python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
---|---|---|
tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/unet/__init__.py |
|
# Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
"""IVA unet dllogger codes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/unet/dllogger/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Logger functions."""
from abc import ABC, abstractmethod
import atexit
from collections import defaultdict
from datetime import datetime
import json
class Backend(ABC):
"""Class Backend to set the verbosity logs."""
def __init__(self, verbosity):
"""Instantiate the Verbosity backend.
Args:
verbosity (bool): Flag to set to give additional information during.
training.
"""
self._verbosity = verbosity
@property
def verbosity(self):
"""Instantiate the Verbosity backend."""
return self._verbosity
@abstractmethod
def log(self, timestamp, elapsedtime, step, data):
"""Instantiate the logger."""
pass
@abstractmethod
def metadata(self, timestamp, elapsedtime, metric, metadata):
"""Function to get the metadata."""
pass
class Verbosity:
"""Class for verbosity logs."""
OFF = -1
DEFAULT = 0
VERBOSE = 1
class Logger:
"""Class for logger."""
def __init__(self, backends):
"""Instantiate the Verbosity backend.
Args:
backend (type): The type of backend to be used for logging e.g.,Json.
"""
self.backends = backends
atexit.register(self.flush)
self.starttime = datetime.now()
def metadata(self, metric, metadata):
"""Function to return the metadata."""
timestamp = datetime.now()
elapsedtime = (timestamp - self.starttime).total_seconds()
for b in self.backends:
b.metadata(timestamp, elapsedtime, metric, metadata)
def log(self, step, data, verbosity=1):
"""Function to return the log."""
timestamp = datetime.now()
elapsedtime = (timestamp - self.starttime).total_seconds()
for b in self.backends:
if b.verbosity >= verbosity:
b.log(timestamp, elapsedtime, step, data)
def flush(self):
"""Function to return the step value."""
for b in self.backends:
b.flush()
def default_step_format(step):
"""Function to return the step value."""
return str(step)
def default_metric_format(metric, metadata, value):
"""Function to return the metric format."""
unit = metadata["unit"] if "unit" in metadata.keys() else ""
formatm = "{" + metadata["format"] + "}" if "format" in metadata.keys() else "{}"
return "{}:{} {}".format(
metric, formatm.format(value) if value is not None else value, unit
)
def default_prefix_format(timestamp):
"""Function to return the prefix format."""
return "DLL {} - ".format(timestamp)
class StdOutBackend(Backend):
"""Class for StdOutBackend."""
def __init__(
self,
verbosity,
step_format=default_step_format,
metric_format=default_metric_format,
prefix_format=default_prefix_format,
):
"""Instantiate the dataloader.
Args:
verbosity (bool): List of tuples (tfrecord_file_pattern,
image_directory_path) to use for training.
step_format (int): Number related to the time format.
metric_format (int): Number related to the time format.
prefix_format (int): Number related to the time format.
"""
super().__init__(verbosity=verbosity)
self._metadata = defaultdict(dict)
self.step_format = step_format
self.metric_format = metric_format
self.prefix_format = prefix_format
self.elapsed = 0.0
def metadata(self, timestamp, elapsedtime, metric, metadata):
"""Function to return the prefix format."""
self._metadata[metric].update(metadata)
def log(self, timestamp, elapsedtime, step, data):
"""Function to return the prefix format."""
print(
"{}{} {}{}".format(
self.prefix_format(timestamp),
self.step_format(step),
" ".join(
[
self.metric_format(m, self._metadata[m], v)
for m, v in data.items()
]
),
"elapsed_time:"+str(elapsedtime)
)
)
def flush(self):
"""Function to return the prefix format."""
pass
class JSONStreamBackend(Backend):
"""Class for Json stream backend."""
def __init__(self, verbosity, filename):
"""Instantiate the Json stream backend for logging.
Args:
verbosity (bool): Verbosity flag to set for
filename (str): Filename to save the logs.
"""
super().__init__(verbosity=verbosity)
self._filename = filename
self.file = open(filename, "w")
atexit.register(self.file.close)
def metadata(self, timestamp, elapsedtime, metric, metadata):
"""Function to return the prefix format."""
self.file.write(
"DLLL {}\n".format(
json.dumps(
dict(
timestamp=str(timestamp.timestamp()),
elapsedtime=str(elapsedtime),
datetime=str(timestamp),
type="METADATA",
metric=metric,
metadata=metadata,
)
)
)
)
def log(self, timestamp, elapsedtime, step, data):
"""Function to return the prefix format."""
self.file.write(
"DLLL {}\n".format(
json.dumps(
dict(
timestamp=str(timestamp.timestamp()),
datetime=str(timestamp),
elapsedtime=str(elapsedtime),
type="LOG",
step=step,
data=data,
)
)
)
)
def flush(self):
"""Function to return the prefix format."""
self.file.flush()
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/unet/dllogger/logger.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A launcher script for Unet tasks inside a runtime container."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from maglev_sdk.docker_container.entrypoint import main
if __name__ == '__main__':
main('unet', 'nvidia_tao_tf1/cv/unet/scripts')
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/unet/docker/unet.py |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nvidia_tao_tf1/cv/unet/proto/data_class_config.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='nvidia_tao_tf1/cv/unet/proto/data_class_config.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n4nvidia_tao_tf1/cv/unet/proto/data_class_config.proto\"\xa3\x01\n\x0f\x44\x61taClassConfig\x12\x34\n\x0etarget_classes\x18\x01 \x03(\x0b\x32\x1c.DataClassConfig.TargetClass\x1aZ\n\x0bTargetClass\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x14\n\x0c\x63lass_weight\x18\x02 \x01(\x02\x12\x10\n\x08label_id\x18\x03 \x01(\x05\x12\x15\n\rmapping_class\x18\x04 \x01(\tb\x06proto3')
)
_DATACLASSCONFIG_TARGETCLASS = _descriptor.Descriptor(
name='TargetClass',
full_name='DataClassConfig.TargetClass',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='DataClassConfig.TargetClass.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='class_weight', full_name='DataClassConfig.TargetClass.class_weight', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='label_id', full_name='DataClassConfig.TargetClass.label_id', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='mapping_class', full_name='DataClassConfig.TargetClass.mapping_class', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=130,
serialized_end=220,
)
_DATACLASSCONFIG = _descriptor.Descriptor(
name='DataClassConfig',
full_name='DataClassConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='target_classes', full_name='DataClassConfig.target_classes', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_DATACLASSCONFIG_TARGETCLASS, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=57,
serialized_end=220,
)
_DATACLASSCONFIG_TARGETCLASS.containing_type = _DATACLASSCONFIG
_DATACLASSCONFIG.fields_by_name['target_classes'].message_type = _DATACLASSCONFIG_TARGETCLASS
DESCRIPTOR.message_types_by_name['DataClassConfig'] = _DATACLASSCONFIG
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
DataClassConfig = _reflection.GeneratedProtocolMessageType('DataClassConfig', (_message.Message,), dict(
TargetClass = _reflection.GeneratedProtocolMessageType('TargetClass', (_message.Message,), dict(
DESCRIPTOR = _DATACLASSCONFIG_TARGETCLASS,
__module__ = 'nvidia_tao_tf1.cv.unet.proto.data_class_config_pb2'
# @@protoc_insertion_point(class_scope:DataClassConfig.TargetClass)
))
,
DESCRIPTOR = _DATACLASSCONFIG,
__module__ = 'nvidia_tao_tf1.cv.unet.proto.data_class_config_pb2'
# @@protoc_insertion_point(class_scope:DataClassConfig)
))
_sym_db.RegisterMessage(DataClassConfig)
_sym_db.RegisterMessage(DataClassConfig.TargetClass)
# @@protoc_insertion_point(module_scope)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/unet/proto/data_class_config_pb2.py |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nvidia_tao_tf1/cv/unet/proto/optimizer_config.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from nvidia_tao_tf1.cv.unet.proto import adam_optimizer_config_pb2 as nvidia__tao__tf1_dot_cv_dot_unet_dot_proto_dot_adam__optimizer__config__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='nvidia_tao_tf1/cv/unet/proto/optimizer_config.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n3nvidia_tao_tf1/cv/unet/proto/optimizer_config.proto\x1a\x38nvidia_tao_tf1/cv/unet/proto/adam_optimizer_config.proto\"D\n\x0fOptimizerConfig\x12$\n\x04\x61\x64\x61m\x18\x01 \x01(\x0b\x32\x14.AdamOptimizerConfigH\x00\x42\x0b\n\toptimizerb\x06proto3')
,
dependencies=[nvidia__tao__tf1_dot_cv_dot_unet_dot_proto_dot_adam__optimizer__config__pb2.DESCRIPTOR,])
_OPTIMIZERCONFIG = _descriptor.Descriptor(
name='OptimizerConfig',
full_name='OptimizerConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='adam', full_name='OptimizerConfig.adam', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='optimizer', full_name='OptimizerConfig.optimizer',
index=0, containing_type=None, fields=[]),
],
serialized_start=113,
serialized_end=181,
)
_OPTIMIZERCONFIG.fields_by_name['adam'].message_type = nvidia__tao__tf1_dot_cv_dot_unet_dot_proto_dot_adam__optimizer__config__pb2._ADAMOPTIMIZERCONFIG
_OPTIMIZERCONFIG.oneofs_by_name['optimizer'].fields.append(
_OPTIMIZERCONFIG.fields_by_name['adam'])
_OPTIMIZERCONFIG.fields_by_name['adam'].containing_oneof = _OPTIMIZERCONFIG.oneofs_by_name['optimizer']
DESCRIPTOR.message_types_by_name['OptimizerConfig'] = _OPTIMIZERCONFIG
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
OptimizerConfig = _reflection.GeneratedProtocolMessageType('OptimizerConfig', (_message.Message,), dict(
DESCRIPTOR = _OPTIMIZERCONFIG,
__module__ = 'nvidia_tao_tf1.cv.unet.proto.optimizer_config_pb2'
# @@protoc_insertion_point(class_scope:OptimizerConfig)
))
_sym_db.RegisterMessage(OptimizerConfig)
# @@protoc_insertion_point(module_scope)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/unet/proto/optimizer_config_pb2.py |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nvidia_tao_tf1/cv/unet/proto/adam_optimizer_config.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='nvidia_tao_tf1/cv/unet/proto/adam_optimizer_config.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n8nvidia_tao_tf1/cv/unet/proto/adam_optimizer_config.proto\"D\n\x13\x41\x64\x61mOptimizerConfig\x12\x0f\n\x07\x65psilon\x18\x01 \x01(\x02\x12\r\n\x05\x62\x65ta1\x18\x02 \x01(\x02\x12\r\n\x05\x62\x65ta2\x18\x03 \x01(\x02\x62\x06proto3')
)
_ADAMOPTIMIZERCONFIG = _descriptor.Descriptor(
name='AdamOptimizerConfig',
full_name='AdamOptimizerConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='epsilon', full_name='AdamOptimizerConfig.epsilon', index=0,
number=1, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='beta1', full_name='AdamOptimizerConfig.beta1', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='beta2', full_name='AdamOptimizerConfig.beta2', index=2,
number=3, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=60,
serialized_end=128,
)
DESCRIPTOR.message_types_by_name['AdamOptimizerConfig'] = _ADAMOPTIMIZERCONFIG
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
AdamOptimizerConfig = _reflection.GeneratedProtocolMessageType('AdamOptimizerConfig', (_message.Message,), dict(
DESCRIPTOR = _ADAMOPTIMIZERCONFIG,
__module__ = 'nvidia_tao_tf1.cv.unet.proto.adam_optimizer_config_pb2'
# @@protoc_insertion_point(class_scope:AdamOptimizerConfig)
))
_sym_db.RegisterMessage(AdamOptimizerConfig)
# @@protoc_insertion_point(module_scope)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/unet/proto/adam_optimizer_config_pb2.py |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nvidia_tao_tf1/cv/unet/proto/training_config.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from nvidia_tao_tf1.cv.unet.proto import optimizer_config_pb2 as nvidia__tao__tf1_dot_cv_dot_unet_dot_proto_dot_optimizer__config__pb2
from nvidia_tao_tf1.cv.unet.proto import regularizer_config_pb2 as nvidia__tao__tf1_dot_cv_dot_unet_dot_proto_dot_regularizer__config__pb2
from nvidia_tao_tf1.cv.unet.proto import visualizer_config_pb2 as nvidia__tao__tf1_dot_cv_dot_unet_dot_proto_dot_visualizer__config__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='nvidia_tao_tf1/cv/unet/proto/training_config.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n2nvidia_tao_tf1/cv/unet/proto/training_config.proto\x1a\x33nvidia_tao_tf1/cv/unet/proto/optimizer_config.proto\x1a\x35nvidia_tao_tf1/cv/unet/proto/regularizer_config.proto\x1a\x34nvidia_tao_tf1/cv/unet/proto/visualizer_config.proto\"\xd9\x06\n\x0eTrainingConfig\x12\x12\n\nbatch_size\x18\x01 \x01(\r\x12\x12\n\nnum_epochs\x18\x02 \x01(\r\x12\'\n\x0bregularizer\x18\x04 \x01(\x0b\x32\x12.RegularizerConfig\x12#\n\toptimizer\x18\x05 \x01(\x0b\x32\x10.OptimizerConfig\x12\x1b\n\x13\x63heckpoint_interval\x18\x07 \x01(\r\x12\x11\n\tmax_steps\x18\x08 \x01(\r\x12\x0e\n\x06\x65pochs\x18\x13 \x01(\r\x12\x19\n\x11log_summary_steps\x18\t \x01(\r\x12\x0f\n\x07\x61ugment\x18\n \x01(\x08\x12\x0f\n\x07use_xla\x18\x0b \x01(\x08\x12\x14\n\x0cwarmup_steps\x18\x0c \x01(\r\x12\x0f\n\x07use_amp\x18\r \x01(\x08\x12\x15\n\rlearning_rate\x18\x0e \x01(\x02\x12\x14\n\x0cweight_decay\x18\x0f \x01(\x02\x12\x0f\n\x07use_trt\x18\x10 \x01(\x08\x12\x1b\n\x13\x63rossvalidation_idx\x18\x11 \x01(\x08\x12\x0c\n\x04loss\x18\x12 \x01(\t\x12\x17\n\x0fweights_monitor\x18\x17 \x01(\x08\x12\x37\n\x0clr_scheduler\x18\x19 \x01(\x0b\x32!.TrainingConfig.LRSchedulerConfig\x12%\n\nvisualizer\x18\x1b \x01(\x0b\x32\x11.VisualizerConfig\x12\x13\n\x0b\x62uffer_size\x18\x1c \x01(\r\x12\x14\n\x0c\x64\x61ta_options\x18\x1d \x01(\x08\x1a\x37\n\x11\x43osineDecayConfig\x12\r\n\x05\x61lpha\x18\x01 \x01(\x02\x12\x13\n\x0b\x64\x65\x63\x61y_steps\x18\x02 \x01(\x05\x1a\x41\n\x16\x45xponentialDecayConfig\x12\x12\n\ndecay_rate\x18\x01 \x01(\x02\x12\x13\n\x0b\x64\x65\x63\x61y_steps\x18\x02 \x01(\x05\x1a\xa3\x01\n\x11LRSchedulerConfig\x12\x43\n\x11\x65xponential_decay\x18\x01 \x01(\x0b\x32&.TrainingConfig.ExponentialDecayConfigH\x00\x12\x39\n\x0c\x63osine_decay\x18\x02 \x01(\x0b\x32!.TrainingConfig.CosineDecayConfigH\x00\x42\x0e\n\x0clr_schedulerb\x06proto3')
,
dependencies=[nvidia__tao__tf1_dot_cv_dot_unet_dot_proto_dot_optimizer__config__pb2.DESCRIPTOR,nvidia__tao__tf1_dot_cv_dot_unet_dot_proto_dot_regularizer__config__pb2.DESCRIPTOR,nvidia__tao__tf1_dot_cv_dot_unet_dot_proto_dot_visualizer__config__pb2.DESCRIPTOR,])
_TRAININGCONFIG_COSINEDECAYCONFIG = _descriptor.Descriptor(
name='CosineDecayConfig',
full_name='TrainingConfig.CosineDecayConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='alpha', full_name='TrainingConfig.CosineDecayConfig.alpha', index=0,
number=1, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='decay_steps', full_name='TrainingConfig.CosineDecayConfig.decay_steps', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=786,
serialized_end=841,
)
_TRAININGCONFIG_EXPONENTIALDECAYCONFIG = _descriptor.Descriptor(
name='ExponentialDecayConfig',
full_name='TrainingConfig.ExponentialDecayConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='decay_rate', full_name='TrainingConfig.ExponentialDecayConfig.decay_rate', index=0,
number=1, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='decay_steps', full_name='TrainingConfig.ExponentialDecayConfig.decay_steps', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=843,
serialized_end=908,
)
_TRAININGCONFIG_LRSCHEDULERCONFIG = _descriptor.Descriptor(
name='LRSchedulerConfig',
full_name='TrainingConfig.LRSchedulerConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='exponential_decay', full_name='TrainingConfig.LRSchedulerConfig.exponential_decay', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='cosine_decay', full_name='TrainingConfig.LRSchedulerConfig.cosine_decay', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='lr_scheduler', full_name='TrainingConfig.LRSchedulerConfig.lr_scheduler',
index=0, containing_type=None, fields=[]),
],
serialized_start=911,
serialized_end=1074,
)
_TRAININGCONFIG = _descriptor.Descriptor(
name='TrainingConfig',
full_name='TrainingConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='batch_size', full_name='TrainingConfig.batch_size', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='num_epochs', full_name='TrainingConfig.num_epochs', index=1,
number=2, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='regularizer', full_name='TrainingConfig.regularizer', index=2,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='optimizer', full_name='TrainingConfig.optimizer', index=3,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='checkpoint_interval', full_name='TrainingConfig.checkpoint_interval', index=4,
number=7, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='max_steps', full_name='TrainingConfig.max_steps', index=5,
number=8, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='epochs', full_name='TrainingConfig.epochs', index=6,
number=19, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='log_summary_steps', full_name='TrainingConfig.log_summary_steps', index=7,
number=9, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='augment', full_name='TrainingConfig.augment', index=8,
number=10, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='use_xla', full_name='TrainingConfig.use_xla', index=9,
number=11, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='warmup_steps', full_name='TrainingConfig.warmup_steps', index=10,
number=12, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='use_amp', full_name='TrainingConfig.use_amp', index=11,
number=13, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='learning_rate', full_name='TrainingConfig.learning_rate', index=12,
number=14, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='weight_decay', full_name='TrainingConfig.weight_decay', index=13,
number=15, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='use_trt', full_name='TrainingConfig.use_trt', index=14,
number=16, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='crossvalidation_idx', full_name='TrainingConfig.crossvalidation_idx', index=15,
number=17, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='loss', full_name='TrainingConfig.loss', index=16,
number=18, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='weights_monitor', full_name='TrainingConfig.weights_monitor', index=17,
number=23, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='lr_scheduler', full_name='TrainingConfig.lr_scheduler', index=18,
number=25, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='visualizer', full_name='TrainingConfig.visualizer', index=19,
number=27, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='buffer_size', full_name='TrainingConfig.buffer_size', index=20,
number=28, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='data_options', full_name='TrainingConfig.data_options', index=21,
number=29, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_TRAININGCONFIG_COSINEDECAYCONFIG, _TRAININGCONFIG_EXPONENTIALDECAYCONFIG, _TRAININGCONFIG_LRSCHEDULERCONFIG, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=217,
serialized_end=1074,
)
_TRAININGCONFIG_COSINEDECAYCONFIG.containing_type = _TRAININGCONFIG
_TRAININGCONFIG_EXPONENTIALDECAYCONFIG.containing_type = _TRAININGCONFIG
_TRAININGCONFIG_LRSCHEDULERCONFIG.fields_by_name['exponential_decay'].message_type = _TRAININGCONFIG_EXPONENTIALDECAYCONFIG
_TRAININGCONFIG_LRSCHEDULERCONFIG.fields_by_name['cosine_decay'].message_type = _TRAININGCONFIG_COSINEDECAYCONFIG
_TRAININGCONFIG_LRSCHEDULERCONFIG.containing_type = _TRAININGCONFIG
_TRAININGCONFIG_LRSCHEDULERCONFIG.oneofs_by_name['lr_scheduler'].fields.append(
_TRAININGCONFIG_LRSCHEDULERCONFIG.fields_by_name['exponential_decay'])
_TRAININGCONFIG_LRSCHEDULERCONFIG.fields_by_name['exponential_decay'].containing_oneof = _TRAININGCONFIG_LRSCHEDULERCONFIG.oneofs_by_name['lr_scheduler']
_TRAININGCONFIG_LRSCHEDULERCONFIG.oneofs_by_name['lr_scheduler'].fields.append(
_TRAININGCONFIG_LRSCHEDULERCONFIG.fields_by_name['cosine_decay'])
_TRAININGCONFIG_LRSCHEDULERCONFIG.fields_by_name['cosine_decay'].containing_oneof = _TRAININGCONFIG_LRSCHEDULERCONFIG.oneofs_by_name['lr_scheduler']
_TRAININGCONFIG.fields_by_name['regularizer'].message_type = nvidia__tao__tf1_dot_cv_dot_unet_dot_proto_dot_regularizer__config__pb2._REGULARIZERCONFIG
_TRAININGCONFIG.fields_by_name['optimizer'].message_type = nvidia__tao__tf1_dot_cv_dot_unet_dot_proto_dot_optimizer__config__pb2._OPTIMIZERCONFIG
_TRAININGCONFIG.fields_by_name['lr_scheduler'].message_type = _TRAININGCONFIG_LRSCHEDULERCONFIG
_TRAININGCONFIG.fields_by_name['visualizer'].message_type = nvidia__tao__tf1_dot_cv_dot_unet_dot_proto_dot_visualizer__config__pb2._VISUALIZERCONFIG
DESCRIPTOR.message_types_by_name['TrainingConfig'] = _TRAININGCONFIG
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
TrainingConfig = _reflection.GeneratedProtocolMessageType('TrainingConfig', (_message.Message,), dict(
CosineDecayConfig = _reflection.GeneratedProtocolMessageType('CosineDecayConfig', (_message.Message,), dict(
DESCRIPTOR = _TRAININGCONFIG_COSINEDECAYCONFIG,
__module__ = 'nvidia_tao_tf1.cv.unet.proto.training_config_pb2'
# @@protoc_insertion_point(class_scope:TrainingConfig.CosineDecayConfig)
))
,
ExponentialDecayConfig = _reflection.GeneratedProtocolMessageType('ExponentialDecayConfig', (_message.Message,), dict(
DESCRIPTOR = _TRAININGCONFIG_EXPONENTIALDECAYCONFIG,
__module__ = 'nvidia_tao_tf1.cv.unet.proto.training_config_pb2'
# @@protoc_insertion_point(class_scope:TrainingConfig.ExponentialDecayConfig)
))
,
LRSchedulerConfig = _reflection.GeneratedProtocolMessageType('LRSchedulerConfig', (_message.Message,), dict(
DESCRIPTOR = _TRAININGCONFIG_LRSCHEDULERCONFIG,
__module__ = 'nvidia_tao_tf1.cv.unet.proto.training_config_pb2'
# @@protoc_insertion_point(class_scope:TrainingConfig.LRSchedulerConfig)
))
,
DESCRIPTOR = _TRAININGCONFIG,
__module__ = 'nvidia_tao_tf1.cv.unet.proto.training_config_pb2'
# @@protoc_insertion_point(class_scope:TrainingConfig)
))
_sym_db.RegisterMessage(TrainingConfig)
_sym_db.RegisterMessage(TrainingConfig.CosineDecayConfig)
_sym_db.RegisterMessage(TrainingConfig.ExponentialDecayConfig)
_sym_db.RegisterMessage(TrainingConfig.LRSchedulerConfig)
# @@protoc_insertion_point(module_scope)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/unet/proto/training_config_pb2.py |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nvidia_tao_tf1/cv/unet/proto/regularizer_config.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='nvidia_tao_tf1/cv/unet/proto/regularizer_config.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n5nvidia_tao_tf1/cv/unet/proto/regularizer_config.proto\"\x8a\x01\n\x11RegularizerConfig\x12\x33\n\x04type\x18\x01 \x01(\x0e\x32%.RegularizerConfig.RegularizationType\x12\x0e\n\x06weight\x18\x02 \x01(\x02\"0\n\x12RegularizationType\x12\n\n\x06NO_REG\x10\x00\x12\x06\n\x02L1\x10\x01\x12\x06\n\x02L2\x10\x02\x62\x06proto3')
)
_REGULARIZERCONFIG_REGULARIZATIONTYPE = _descriptor.EnumDescriptor(
name='RegularizationType',
full_name='RegularizerConfig.RegularizationType',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='NO_REG', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='L1', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='L2', index=2, number=2,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=148,
serialized_end=196,
)
_sym_db.RegisterEnumDescriptor(_REGULARIZERCONFIG_REGULARIZATIONTYPE)
_REGULARIZERCONFIG = _descriptor.Descriptor(
name='RegularizerConfig',
full_name='RegularizerConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='type', full_name='RegularizerConfig.type', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='weight', full_name='RegularizerConfig.weight', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
_REGULARIZERCONFIG_REGULARIZATIONTYPE,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=58,
serialized_end=196,
)
_REGULARIZERCONFIG.fields_by_name['type'].enum_type = _REGULARIZERCONFIG_REGULARIZATIONTYPE
_REGULARIZERCONFIG_REGULARIZATIONTYPE.containing_type = _REGULARIZERCONFIG
DESCRIPTOR.message_types_by_name['RegularizerConfig'] = _REGULARIZERCONFIG
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
RegularizerConfig = _reflection.GeneratedProtocolMessageType('RegularizerConfig', (_message.Message,), dict(
DESCRIPTOR = _REGULARIZERCONFIG,
__module__ = 'nvidia_tao_tf1.cv.unet.proto.regularizer_config_pb2'
# @@protoc_insertion_point(class_scope:RegularizerConfig)
))
_sym_db.RegisterMessage(RegularizerConfig)
# @@protoc_insertion_point(module_scope)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/unet/proto/regularizer_config_pb2.py |
# Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved.
"""Defining protocol buffers for different components of GB."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/unet/proto/__init__.py |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nvidia_tao_tf1/cv/unet/proto/augmentation_config.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='nvidia_tao_tf1/cv/unet/proto/augmentation_config.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n6nvidia_tao_tf1/cv/unet/proto/augmentation_config.proto\"\xdc\x02\n\x12\x41ugmentationConfig\x12\x45\n\x14spatial_augmentation\x18\x02 \x01(\x0b\x32\'.AugmentationConfig.SpatialAugmentation\x12K\n\x17\x62rightness_augmentation\x18\x03 \x01(\x0b\x32*.AugmentationConfig.BrightnessAugmentation\x1a\x88\x01\n\x13SpatialAugmentation\x12\x19\n\x11hflip_probability\x18\x01 \x01(\x02\x12\x19\n\x11vflip_probability\x18\x02 \x01(\x02\x12\x1c\n\x14\x63rop_and_resize_prob\x18\x03 \x01(\x02\x12\x1d\n\x15\x63rop_and_resize_ratio\x18\x04 \x01(\x02\x1a\'\n\x16\x42rightnessAugmentation\x12\r\n\x05\x64\x65lta\x18\x01 \x01(\x02\x62\x06proto3')
)
_AUGMENTATIONCONFIG_SPATIALAUGMENTATION = _descriptor.Descriptor(
name='SpatialAugmentation',
full_name='AugmentationConfig.SpatialAugmentation',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='hflip_probability', full_name='AugmentationConfig.SpatialAugmentation.hflip_probability', index=0,
number=1, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='vflip_probability', full_name='AugmentationConfig.SpatialAugmentation.vflip_probability', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='crop_and_resize_prob', full_name='AugmentationConfig.SpatialAugmentation.crop_and_resize_prob', index=2,
number=3, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='crop_and_resize_ratio', full_name='AugmentationConfig.SpatialAugmentation.crop_and_resize_ratio', index=3,
number=4, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=230,
serialized_end=366,
)
_AUGMENTATIONCONFIG_BRIGHTNESSAUGMENTATION = _descriptor.Descriptor(
name='BrightnessAugmentation',
full_name='AugmentationConfig.BrightnessAugmentation',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='delta', full_name='AugmentationConfig.BrightnessAugmentation.delta', index=0,
number=1, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=368,
serialized_end=407,
)
_AUGMENTATIONCONFIG = _descriptor.Descriptor(
name='AugmentationConfig',
full_name='AugmentationConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='spatial_augmentation', full_name='AugmentationConfig.spatial_augmentation', index=0,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='brightness_augmentation', full_name='AugmentationConfig.brightness_augmentation', index=1,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_AUGMENTATIONCONFIG_SPATIALAUGMENTATION, _AUGMENTATIONCONFIG_BRIGHTNESSAUGMENTATION, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=59,
serialized_end=407,
)
_AUGMENTATIONCONFIG_SPATIALAUGMENTATION.containing_type = _AUGMENTATIONCONFIG
_AUGMENTATIONCONFIG_BRIGHTNESSAUGMENTATION.containing_type = _AUGMENTATIONCONFIG
_AUGMENTATIONCONFIG.fields_by_name['spatial_augmentation'].message_type = _AUGMENTATIONCONFIG_SPATIALAUGMENTATION
_AUGMENTATIONCONFIG.fields_by_name['brightness_augmentation'].message_type = _AUGMENTATIONCONFIG_BRIGHTNESSAUGMENTATION
DESCRIPTOR.message_types_by_name['AugmentationConfig'] = _AUGMENTATIONCONFIG
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
AugmentationConfig = _reflection.GeneratedProtocolMessageType('AugmentationConfig', (_message.Message,), dict(
SpatialAugmentation = _reflection.GeneratedProtocolMessageType('SpatialAugmentation', (_message.Message,), dict(
DESCRIPTOR = _AUGMENTATIONCONFIG_SPATIALAUGMENTATION,
__module__ = 'nvidia_tao_tf1.cv.unet.proto.augmentation_config_pb2'
# @@protoc_insertion_point(class_scope:AugmentationConfig.SpatialAugmentation)
))
,
BrightnessAugmentation = _reflection.GeneratedProtocolMessageType('BrightnessAugmentation', (_message.Message,), dict(
DESCRIPTOR = _AUGMENTATIONCONFIG_BRIGHTNESSAUGMENTATION,
__module__ = 'nvidia_tao_tf1.cv.unet.proto.augmentation_config_pb2'
# @@protoc_insertion_point(class_scope:AugmentationConfig.BrightnessAugmentation)
))
,
DESCRIPTOR = _AUGMENTATIONCONFIG,
__module__ = 'nvidia_tao_tf1.cv.unet.proto.augmentation_config_pb2'
# @@protoc_insertion_point(class_scope:AugmentationConfig)
))
_sym_db.RegisterMessage(AugmentationConfig)
_sym_db.RegisterMessage(AugmentationConfig.SpatialAugmentation)
_sym_db.RegisterMessage(AugmentationConfig.BrightnessAugmentation)
# @@protoc_insertion_point(module_scope)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/unet/proto/augmentation_config_pb2.py |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nvidia_tao_tf1/cv/unet/proto/visualizer_config.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from nvidia_tao_tf1.cv.common.proto import wandb_config_pb2 as nvidia__tao__tf1_dot_cv_dot_common_dot_proto_dot_wandb__config__pb2
from nvidia_tao_tf1.cv.common.proto import clearml_config_pb2 as nvidia__tao__tf1_dot_cv_dot_common_dot_proto_dot_clearml__config__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='nvidia_tao_tf1/cv/unet/proto/visualizer_config.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n4nvidia_tao_tf1/cv/unet/proto/visualizer_config.proto\x1a\x31nvidia_tao_tf1/cv/common/proto/wandb_config.proto\x1a\x33nvidia_tao_tf1/cv/common/proto/clearml_config.proto\"\xb2\x01\n\x10VisualizerConfig\x12\x0f\n\x07\x65nabled\x18\x01 \x01(\x08\x12\x1a\n\x12save_summary_steps\x18\x02 \x01(\r\x12%\n\x1dinfrequent_save_summary_steps\x18\x03 \x01(\r\x12\"\n\x0cwandb_config\x18\x04 \x01(\x0b\x32\x0c.WandBConfig\x12&\n\x0e\x63learml_config\x18\x05 \x01(\x0b\x32\x0e.ClearMLConfigb\x06proto3')
,
dependencies=[nvidia__tao__tf1_dot_cv_dot_common_dot_proto_dot_wandb__config__pb2.DESCRIPTOR,nvidia__tao__tf1_dot_cv_dot_common_dot_proto_dot_clearml__config__pb2.DESCRIPTOR,])
_VISUALIZERCONFIG = _descriptor.Descriptor(
name='VisualizerConfig',
full_name='VisualizerConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='enabled', full_name='VisualizerConfig.enabled', index=0,
number=1, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='save_summary_steps', full_name='VisualizerConfig.save_summary_steps', index=1,
number=2, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='infrequent_save_summary_steps', full_name='VisualizerConfig.infrequent_save_summary_steps', index=2,
number=3, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='wandb_config', full_name='VisualizerConfig.wandb_config', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='clearml_config', full_name='VisualizerConfig.clearml_config', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=161,
serialized_end=339,
)
_VISUALIZERCONFIG.fields_by_name['wandb_config'].message_type = nvidia__tao__tf1_dot_cv_dot_common_dot_proto_dot_wandb__config__pb2._WANDBCONFIG
_VISUALIZERCONFIG.fields_by_name['clearml_config'].message_type = nvidia__tao__tf1_dot_cv_dot_common_dot_proto_dot_clearml__config__pb2._CLEARMLCONFIG
DESCRIPTOR.message_types_by_name['VisualizerConfig'] = _VISUALIZERCONFIG
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
VisualizerConfig = _reflection.GeneratedProtocolMessageType('VisualizerConfig', (_message.Message,), dict(
DESCRIPTOR = _VISUALIZERCONFIG,
__module__ = 'nvidia_tao_tf1.cv.unet.proto.visualizer_config_pb2'
# @@protoc_insertion_point(class_scope:VisualizerConfig)
))
_sym_db.RegisterMessage(VisualizerConfig)
# @@protoc_insertion_point(module_scope)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/unet/proto/visualizer_config_pb2.py |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nvidia_tao_tf1/cv/unet/proto/experiment.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from nvidia_tao_tf1.cv.unet.proto import dataset_config_pb2 as nvidia__tao__tf1_dot_cv_dot_unet_dot_proto_dot_dataset__config__pb2
from nvidia_tao_tf1.cv.unet.proto import evaluation_config_pb2 as nvidia__tao__tf1_dot_cv_dot_unet_dot_proto_dot_evaluation__config__pb2
from nvidia_tao_tf1.cv.unet.proto import model_config_pb2 as nvidia__tao__tf1_dot_cv_dot_unet_dot_proto_dot_model__config__pb2
from nvidia_tao_tf1.cv.unet.proto import training_config_pb2 as nvidia__tao__tf1_dot_cv_dot_unet_dot_proto_dot_training__config__pb2
from nvidia_tao_tf1.cv.unet.proto import data_class_config_pb2 as nvidia__tao__tf1_dot_cv_dot_unet_dot_proto_dot_data__class__config__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='nvidia_tao_tf1/cv/unet/proto/experiment.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n-nvidia_tao_tf1/cv/unet/proto/experiment.proto\x1a\x31nvidia_tao_tf1/cv/unet/proto/dataset_config.proto\x1a\x34nvidia_tao_tf1/cv/unet/proto/evaluation_config.proto\x1a/nvidia_tao_tf1/cv/unet/proto/model_config.proto\x1a\x32nvidia_tao_tf1/cv/unet/proto/training_config.proto\x1a\x34nvidia_tao_tf1/cv/unet/proto/data_class_config.proto\"\xf2\x01\n\nExperiment\x12\x13\n\x0brandom_seed\x18\x01 \x01(\r\x12\"\n\x0cmodel_config\x18\x05 \x01(\x0b\x32\x0c.ModelConfig\x12&\n\x0e\x64\x61taset_config\x18\x02 \x01(\x0b\x32\x0e.DatasetConfig\x12,\n\x11\x65valuation_config\x18\x06 \x01(\x0b\x32\x11.EvaluationConfig\x12(\n\x0ftraining_config\x18\t \x01(\x0b\x32\x0f.TrainingConfig\x12+\n\x11\x64\x61ta_class_config\x18\n \x01(\x0b\x32\x10.DataClassConfigb\x06proto3')
,
dependencies=[nvidia__tao__tf1_dot_cv_dot_unet_dot_proto_dot_dataset__config__pb2.DESCRIPTOR,nvidia__tao__tf1_dot_cv_dot_unet_dot_proto_dot_evaluation__config__pb2.DESCRIPTOR,nvidia__tao__tf1_dot_cv_dot_unet_dot_proto_dot_model__config__pb2.DESCRIPTOR,nvidia__tao__tf1_dot_cv_dot_unet_dot_proto_dot_training__config__pb2.DESCRIPTOR,nvidia__tao__tf1_dot_cv_dot_unet_dot_proto_dot_data__class__config__pb2.DESCRIPTOR,])
_EXPERIMENT = _descriptor.Descriptor(
name='Experiment',
full_name='Experiment',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='random_seed', full_name='Experiment.random_seed', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='model_config', full_name='Experiment.model_config', index=1,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='dataset_config', full_name='Experiment.dataset_config', index=2,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='evaluation_config', full_name='Experiment.evaluation_config', index=3,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='training_config', full_name='Experiment.training_config', index=4,
number=9, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='data_class_config', full_name='Experiment.data_class_config', index=5,
number=10, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=310,
serialized_end=552,
)
_EXPERIMENT.fields_by_name['model_config'].message_type = nvidia__tao__tf1_dot_cv_dot_unet_dot_proto_dot_model__config__pb2._MODELCONFIG
_EXPERIMENT.fields_by_name['dataset_config'].message_type = nvidia__tao__tf1_dot_cv_dot_unet_dot_proto_dot_dataset__config__pb2._DATASETCONFIG
_EXPERIMENT.fields_by_name['evaluation_config'].message_type = nvidia__tao__tf1_dot_cv_dot_unet_dot_proto_dot_evaluation__config__pb2._EVALUATIONCONFIG
_EXPERIMENT.fields_by_name['training_config'].message_type = nvidia__tao__tf1_dot_cv_dot_unet_dot_proto_dot_training__config__pb2._TRAININGCONFIG
_EXPERIMENT.fields_by_name['data_class_config'].message_type = nvidia__tao__tf1_dot_cv_dot_unet_dot_proto_dot_data__class__config__pb2._DATACLASSCONFIG
DESCRIPTOR.message_types_by_name['Experiment'] = _EXPERIMENT
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Experiment = _reflection.GeneratedProtocolMessageType('Experiment', (_message.Message,), dict(
DESCRIPTOR = _EXPERIMENT,
__module__ = 'nvidia_tao_tf1.cv.unet.proto.experiment_pb2'
# @@protoc_insertion_point(class_scope:Experiment)
))
_sym_db.RegisterMessage(Experiment)
# @@protoc_insertion_point(module_scope)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/unet/proto/experiment_pb2.py |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nvidia_tao_tf1/cv/unet/proto/evaluation_config.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='nvidia_tao_tf1/cv/unet/proto/evaluation_config.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n4nvidia_tao_tf1/cv/unet/proto/evaluation_config.proto\"\x97\x05\n\x10\x45valuationConfig\x12)\n!validation_period_during_training\x18\x01 \x01(\r\x12\x1e\n\x16\x66irst_validation_epoch\x18\x02 \x01(\r\x12i\n&minimum_detection_ground_truth_overlap\x18\x03 \x03(\x0b\x32\x39.EvaluationConfig.MinimumDetectionGroundTruthOverlapEntry\x12I\n\x15\x65valuation_box_config\x18\x04 \x03(\x0b\x32*.EvaluationConfig.EvaluationBoxConfigEntry\x12\x39\n\x16\x61verage_precision_mode\x18\x05 \x01(\x0e\x32\x19.EvaluationConfig.AP_MODE\x1aI\n\'MinimumDetectionGroundTruthOverlapEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x02:\x02\x38\x01\x1as\n\x13\x45valuationBoxConfig\x12\x16\n\x0eminimum_height\x18\x01 \x01(\x05\x12\x16\n\x0emaximum_height\x18\x02 \x01(\x05\x12\x15\n\rminimum_width\x18\x03 \x01(\x05\x12\x15\n\rmaximum_width\x18\x04 \x01(\x05\x1a\x61\n\x18\x45valuationBoxConfigEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x34\n\x05value\x18\x02 \x01(\x0b\x32%.EvaluationConfig.EvaluationBoxConfig:\x02\x38\x01\"$\n\x07\x41P_MODE\x12\n\n\x06SAMPLE\x10\x00\x12\r\n\tINTEGRATE\x10\x01\x62\x06proto3')
)
_EVALUATIONCONFIG_AP_MODE = _descriptor.EnumDescriptor(
name='AP_MODE',
full_name='EvaluationConfig.AP_MODE',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='SAMPLE', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INTEGRATE', index=1, number=1,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=684,
serialized_end=720,
)
_sym_db.RegisterEnumDescriptor(_EVALUATIONCONFIG_AP_MODE)
_EVALUATIONCONFIG_MINIMUMDETECTIONGROUNDTRUTHOVERLAPENTRY = _descriptor.Descriptor(
name='MinimumDetectionGroundTruthOverlapEntry',
full_name='EvaluationConfig.MinimumDetectionGroundTruthOverlapEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='EvaluationConfig.MinimumDetectionGroundTruthOverlapEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='EvaluationConfig.MinimumDetectionGroundTruthOverlapEntry.value', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=_b('8\001'),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=393,
serialized_end=466,
)
_EVALUATIONCONFIG_EVALUATIONBOXCONFIG = _descriptor.Descriptor(
name='EvaluationBoxConfig',
full_name='EvaluationConfig.EvaluationBoxConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='minimum_height', full_name='EvaluationConfig.EvaluationBoxConfig.minimum_height', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='maximum_height', full_name='EvaluationConfig.EvaluationBoxConfig.maximum_height', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='minimum_width', full_name='EvaluationConfig.EvaluationBoxConfig.minimum_width', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='maximum_width', full_name='EvaluationConfig.EvaluationBoxConfig.maximum_width', index=3,
number=4, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=468,
serialized_end=583,
)
_EVALUATIONCONFIG_EVALUATIONBOXCONFIGENTRY = _descriptor.Descriptor(
name='EvaluationBoxConfigEntry',
full_name='EvaluationConfig.EvaluationBoxConfigEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='EvaluationConfig.EvaluationBoxConfigEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='EvaluationConfig.EvaluationBoxConfigEntry.value', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=_b('8\001'),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=585,
serialized_end=682,
)
_EVALUATIONCONFIG = _descriptor.Descriptor(
name='EvaluationConfig',
full_name='EvaluationConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='validation_period_during_training', full_name='EvaluationConfig.validation_period_during_training', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='first_validation_epoch', full_name='EvaluationConfig.first_validation_epoch', index=1,
number=2, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='minimum_detection_ground_truth_overlap', full_name='EvaluationConfig.minimum_detection_ground_truth_overlap', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='evaluation_box_config', full_name='EvaluationConfig.evaluation_box_config', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='average_precision_mode', full_name='EvaluationConfig.average_precision_mode', index=4,
number=5, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_EVALUATIONCONFIG_MINIMUMDETECTIONGROUNDTRUTHOVERLAPENTRY, _EVALUATIONCONFIG_EVALUATIONBOXCONFIG, _EVALUATIONCONFIG_EVALUATIONBOXCONFIGENTRY, ],
enum_types=[
_EVALUATIONCONFIG_AP_MODE,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=57,
serialized_end=720,
)
_EVALUATIONCONFIG_MINIMUMDETECTIONGROUNDTRUTHOVERLAPENTRY.containing_type = _EVALUATIONCONFIG
_EVALUATIONCONFIG_EVALUATIONBOXCONFIG.containing_type = _EVALUATIONCONFIG
_EVALUATIONCONFIG_EVALUATIONBOXCONFIGENTRY.fields_by_name['value'].message_type = _EVALUATIONCONFIG_EVALUATIONBOXCONFIG
_EVALUATIONCONFIG_EVALUATIONBOXCONFIGENTRY.containing_type = _EVALUATIONCONFIG
_EVALUATIONCONFIG.fields_by_name['minimum_detection_ground_truth_overlap'].message_type = _EVALUATIONCONFIG_MINIMUMDETECTIONGROUNDTRUTHOVERLAPENTRY
_EVALUATIONCONFIG.fields_by_name['evaluation_box_config'].message_type = _EVALUATIONCONFIG_EVALUATIONBOXCONFIGENTRY
_EVALUATIONCONFIG.fields_by_name['average_precision_mode'].enum_type = _EVALUATIONCONFIG_AP_MODE
_EVALUATIONCONFIG_AP_MODE.containing_type = _EVALUATIONCONFIG
DESCRIPTOR.message_types_by_name['EvaluationConfig'] = _EVALUATIONCONFIG
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
EvaluationConfig = _reflection.GeneratedProtocolMessageType('EvaluationConfig', (_message.Message,), dict(
MinimumDetectionGroundTruthOverlapEntry = _reflection.GeneratedProtocolMessageType('MinimumDetectionGroundTruthOverlapEntry', (_message.Message,), dict(
DESCRIPTOR = _EVALUATIONCONFIG_MINIMUMDETECTIONGROUNDTRUTHOVERLAPENTRY,
__module__ = 'nvidia_tao_tf1.cv.unet.proto.evaluation_config_pb2'
# @@protoc_insertion_point(class_scope:EvaluationConfig.MinimumDetectionGroundTruthOverlapEntry)
))
,
EvaluationBoxConfig = _reflection.GeneratedProtocolMessageType('EvaluationBoxConfig', (_message.Message,), dict(
DESCRIPTOR = _EVALUATIONCONFIG_EVALUATIONBOXCONFIG,
__module__ = 'nvidia_tao_tf1.cv.unet.proto.evaluation_config_pb2'
# @@protoc_insertion_point(class_scope:EvaluationConfig.EvaluationBoxConfig)
))
,
EvaluationBoxConfigEntry = _reflection.GeneratedProtocolMessageType('EvaluationBoxConfigEntry', (_message.Message,), dict(
DESCRIPTOR = _EVALUATIONCONFIG_EVALUATIONBOXCONFIGENTRY,
__module__ = 'nvidia_tao_tf1.cv.unet.proto.evaluation_config_pb2'
# @@protoc_insertion_point(class_scope:EvaluationConfig.EvaluationBoxConfigEntry)
))
,
DESCRIPTOR = _EVALUATIONCONFIG,
__module__ = 'nvidia_tao_tf1.cv.unet.proto.evaluation_config_pb2'
# @@protoc_insertion_point(class_scope:EvaluationConfig)
))
_sym_db.RegisterMessage(EvaluationConfig)
_sym_db.RegisterMessage(EvaluationConfig.MinimumDetectionGroundTruthOverlapEntry)
_sym_db.RegisterMessage(EvaluationConfig.EvaluationBoxConfig)
_sym_db.RegisterMessage(EvaluationConfig.EvaluationBoxConfigEntry)
_EVALUATIONCONFIG_MINIMUMDETECTIONGROUNDTRUTHOVERLAPENTRY._options = None
_EVALUATIONCONFIG_EVALUATIONBOXCONFIGENTRY._options = None
# @@protoc_insertion_point(module_scope)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/unet/proto/evaluation_config_pb2.py |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nvidia_tao_tf1/cv/unet/proto/model_config.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='nvidia_tao_tf1/cv/unet/proto/model_config.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n/nvidia_tao_tf1/cv/unet/proto/model_config.proto\"\xc1\x06\n\x0bModelConfig\x12\x1d\n\x15pretrained_model_file\x18\x01 \x01(\t\x12 \n\x18\x66reeze_pretrained_layers\x18\x02 \x01(\x08\x12\'\n\x1f\x61llow_loaded_model_modification\x18\x03 \x01(\x08\x12\x12\n\nnum_layers\x18\x04 \x01(\x05\x12\x13\n\x0buse_pooling\x18\x05 \x01(\x08\x12\x16\n\x0euse_batch_norm\x18\x06 \x01(\x08\x12\x13\n\x0bremove_head\x18# \x01(\x08\x12\x12\n\nbyom_model\x18\x1f \x01(\t\x12\x14\n\x0c\x64ropout_rate\x18\x07 \x01(\x02\x12\x12\n\nactivation\x18\x15 \x01(\t\x12:\n\x12training_precision\x18\n \x01(\x0b\x32\x1e.ModelConfig.TrainingPrecision\x12\x11\n\tfreeze_bn\x18\x0b \x01(\x08\x12\x15\n\rfreeze_blocks\x18\x0c \x03(\x02\x12\x0c\n\x04\x61rch\x18\r \x01(\t\x12\x12\n\nload_graph\x18\x0e \x01(\x08\x12\x17\n\x0f\x61ll_projections\x18\x0f \x01(\x08\x12\x12\n\nenable_qat\x18\x1d \x01(\x08\x12\x1a\n\x12model_input_height\x18\x10 \x01(\x05\x12\x19\n\x11model_input_width\x18\x11 \x01(\x05\x12\x1c\n\x14model_input_channels\x18\x13 \x01(\x05\x12\x19\n\x11pruned_model_path\x18\x14 \x01(\t\x12\x33\n\x0binitializer\x18\x17 \x01(\x0e\x32\x1e.ModelConfig.KernelInitializer\x1a\x91\x01\n\x11TrainingPrecision\x12\x44\n\x0e\x62\x61\x63kend_floatx\x18\x01 \x01(\x0e\x32,.ModelConfig.TrainingPrecision.BackendFloatx\"6\n\rBackendFloatx\x12\x0b\n\x07INVALID\x10\x00\x12\x0b\n\x07\x46LOAT16\x10\x01\x12\x0b\n\x07\x46LOAT32\x10\x02\"F\n\x11KernelInitializer\x12\x12\n\x0eGLOROT_UNIFORM\x10\x00\x12\r\n\tHE_NORMAL\x10\x01\x12\x0e\n\nHE_UNIFORM\x10\x02\x62\x06proto3')
)
_MODELCONFIG_TRAININGPRECISION_BACKENDFLOATX = _descriptor.EnumDescriptor(
name='BackendFloatx',
full_name='ModelConfig.TrainingPrecision.BackendFloatx',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='INVALID', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='FLOAT16', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='FLOAT32', index=2, number=2,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=759,
serialized_end=813,
)
_sym_db.RegisterEnumDescriptor(_MODELCONFIG_TRAININGPRECISION_BACKENDFLOATX)
_MODELCONFIG_KERNELINITIALIZER = _descriptor.EnumDescriptor(
name='KernelInitializer',
full_name='ModelConfig.KernelInitializer',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='GLOROT_UNIFORM', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='HE_NORMAL', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='HE_UNIFORM', index=2, number=2,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=815,
serialized_end=885,
)
_sym_db.RegisterEnumDescriptor(_MODELCONFIG_KERNELINITIALIZER)
_MODELCONFIG_TRAININGPRECISION = _descriptor.Descriptor(
name='TrainingPrecision',
full_name='ModelConfig.TrainingPrecision',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='backend_floatx', full_name='ModelConfig.TrainingPrecision.backend_floatx', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
_MODELCONFIG_TRAININGPRECISION_BACKENDFLOATX,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=668,
serialized_end=813,
)
_MODELCONFIG = _descriptor.Descriptor(
name='ModelConfig',
full_name='ModelConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='pretrained_model_file', full_name='ModelConfig.pretrained_model_file', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='freeze_pretrained_layers', full_name='ModelConfig.freeze_pretrained_layers', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='allow_loaded_model_modification', full_name='ModelConfig.allow_loaded_model_modification', index=2,
number=3, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='num_layers', full_name='ModelConfig.num_layers', index=3,
number=4, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='use_pooling', full_name='ModelConfig.use_pooling', index=4,
number=5, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='use_batch_norm', full_name='ModelConfig.use_batch_norm', index=5,
number=6, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='remove_head', full_name='ModelConfig.remove_head', index=6,
number=35, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='byom_model', full_name='ModelConfig.byom_model', index=7,
number=31, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='dropout_rate', full_name='ModelConfig.dropout_rate', index=8,
number=7, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='activation', full_name='ModelConfig.activation', index=9,
number=21, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='training_precision', full_name='ModelConfig.training_precision', index=10,
number=10, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='freeze_bn', full_name='ModelConfig.freeze_bn', index=11,
number=11, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='freeze_blocks', full_name='ModelConfig.freeze_blocks', index=12,
number=12, type=2, cpp_type=6, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='arch', full_name='ModelConfig.arch', index=13,
number=13, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='load_graph', full_name='ModelConfig.load_graph', index=14,
number=14, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='all_projections', full_name='ModelConfig.all_projections', index=15,
number=15, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='enable_qat', full_name='ModelConfig.enable_qat', index=16,
number=29, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='model_input_height', full_name='ModelConfig.model_input_height', index=17,
number=16, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='model_input_width', full_name='ModelConfig.model_input_width', index=18,
number=17, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='model_input_channels', full_name='ModelConfig.model_input_channels', index=19,
number=19, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='pruned_model_path', full_name='ModelConfig.pruned_model_path', index=20,
number=20, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='initializer', full_name='ModelConfig.initializer', index=21,
number=23, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_MODELCONFIG_TRAININGPRECISION, ],
enum_types=[
_MODELCONFIG_KERNELINITIALIZER,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=52,
serialized_end=885,
)
_MODELCONFIG_TRAININGPRECISION.fields_by_name['backend_floatx'].enum_type = _MODELCONFIG_TRAININGPRECISION_BACKENDFLOATX
_MODELCONFIG_TRAININGPRECISION.containing_type = _MODELCONFIG
_MODELCONFIG_TRAININGPRECISION_BACKENDFLOATX.containing_type = _MODELCONFIG_TRAININGPRECISION
_MODELCONFIG.fields_by_name['training_precision'].message_type = _MODELCONFIG_TRAININGPRECISION
_MODELCONFIG.fields_by_name['initializer'].enum_type = _MODELCONFIG_KERNELINITIALIZER
_MODELCONFIG_KERNELINITIALIZER.containing_type = _MODELCONFIG
DESCRIPTOR.message_types_by_name['ModelConfig'] = _MODELCONFIG
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
ModelConfig = _reflection.GeneratedProtocolMessageType('ModelConfig', (_message.Message,), dict(
TrainingPrecision = _reflection.GeneratedProtocolMessageType('TrainingPrecision', (_message.Message,), dict(
DESCRIPTOR = _MODELCONFIG_TRAININGPRECISION,
__module__ = 'nvidia_tao_tf1.cv.unet.proto.model_config_pb2'
# @@protoc_insertion_point(class_scope:ModelConfig.TrainingPrecision)
))
,
DESCRIPTOR = _MODELCONFIG,
__module__ = 'nvidia_tao_tf1.cv.unet.proto.model_config_pb2'
# @@protoc_insertion_point(class_scope:ModelConfig)
))
_sym_db.RegisterMessage(ModelConfig)
_sym_db.RegisterMessage(ModelConfig.TrainingPrecision)
# @@protoc_insertion_point(module_scope)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/unet/proto/model_config_pb2.py |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nvidia_tao_tf1/cv/unet/proto/dataset_config.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from nvidia_tao_tf1.cv.unet.proto import data_class_config_pb2 as nvidia__tao__tf1_dot_cv_dot_unet_dot_proto_dot_data__class__config__pb2
from nvidia_tao_tf1.cv.unet.proto import augmentation_config_pb2 as nvidia__tao__tf1_dot_cv_dot_unet_dot_proto_dot_augmentation__config__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='nvidia_tao_tf1/cv/unet/proto/dataset_config.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n1nvidia_tao_tf1/cv/unet/proto/dataset_config.proto\x1a\x34nvidia_tao_tf1/cv/unet/proto/data_class_config.proto\x1a\x36nvidia_tao_tf1/cv/unet/proto/augmentation_config.proto\"4\n\nDataSource\x12\x12\n\nimage_path\x18\x01 \x01(\t\x12\x12\n\nmasks_path\x18\x02 \x01(\t\"3\n\x0fTrainDataSource\x12 \n\x0b\x64\x61ta_source\x18\x01 \x03(\x0b\x32\x0b.DataSource\"1\n\rValDataSource\x12 \n\x0b\x64\x61ta_source\x18\x01 \x03(\x0b\x32\x0b.DataSource\"2\n\x0eTestDataSource\x12 \n\x0b\x64\x61ta_source\x18\x01 \x03(\x0b\x32\x0b.DataSource\"\xb3\x04\n\rDatasetConfig\x12\x0f\n\x07\x61ugment\x18\x03 \x01(\x08\x12\x13\n\x0b\x66ilter_data\x18\x1f \x01(\x08\x12\x0f\n\x07\x64\x61taset\x18\n \x01(\t\x12\x12\n\ndataloader\x18\x14 \x01(\t\x12\x12\n\npreprocess\x18\x19 \x01(\t\x12\x16\n\x0eresize_padding\x18\x1d \x01(\x08\x12\x15\n\rresize_method\x18\x1e \x01(\t\x12\x18\n\x10input_image_type\x18\x0b \x01(\t\x12,\n\x12train_data_sources\x18\x01 \x01(\x0b\x32\x10.TrainDataSource\x12(\n\x10val_data_sources\x18\x02 \x01(\x0b\x32\x0e.ValDataSource\x12*\n\x11test_data_sources\x18\x04 \x01(\x0b\x32\x0f.TestDataSource\x12+\n\x11\x64\x61ta_class_config\x18\x12 \x01(\x0b\x32\x10.DataClassConfig\x12\x30\n\x13\x61ugmentation_config\x18\x1c \x01(\x0b\x32\x13.AugmentationConfig\x12\x19\n\x11train_images_path\x18\x0c \x01(\t\x12\x18\n\x10train_masks_path\x18\r \x01(\t\x12\x17\n\x0fval_images_path\x18\x0e \x01(\t\x12\x16\n\x0eval_masks_path\x18\x0f \x01(\t\x12\x18\n\x10test_images_path\x18\x10 \x01(\t\x12\x17\n\x0ftest_masks_path\x18\x11 \x01(\tb\x06proto3')
,
dependencies=[nvidia__tao__tf1_dot_cv_dot_unet_dot_proto_dot_data__class__config__pb2.DESCRIPTOR,nvidia__tao__tf1_dot_cv_dot_unet_dot_proto_dot_augmentation__config__pb2.DESCRIPTOR,])
_DATASOURCE = _descriptor.Descriptor(
name='DataSource',
full_name='DataSource',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='image_path', full_name='DataSource.image_path', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='masks_path', full_name='DataSource.masks_path', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=163,
serialized_end=215,
)
_TRAINDATASOURCE = _descriptor.Descriptor(
name='TrainDataSource',
full_name='TrainDataSource',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='data_source', full_name='TrainDataSource.data_source', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=217,
serialized_end=268,
)
_VALDATASOURCE = _descriptor.Descriptor(
name='ValDataSource',
full_name='ValDataSource',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='data_source', full_name='ValDataSource.data_source', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=270,
serialized_end=319,
)
_TESTDATASOURCE = _descriptor.Descriptor(
name='TestDataSource',
full_name='TestDataSource',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='data_source', full_name='TestDataSource.data_source', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=321,
serialized_end=371,
)
_DATASETCONFIG = _descriptor.Descriptor(
name='DatasetConfig',
full_name='DatasetConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='augment', full_name='DatasetConfig.augment', index=0,
number=3, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='filter_data', full_name='DatasetConfig.filter_data', index=1,
number=31, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='dataset', full_name='DatasetConfig.dataset', index=2,
number=10, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='dataloader', full_name='DatasetConfig.dataloader', index=3,
number=20, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='preprocess', full_name='DatasetConfig.preprocess', index=4,
number=25, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='resize_padding', full_name='DatasetConfig.resize_padding', index=5,
number=29, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='resize_method', full_name='DatasetConfig.resize_method', index=6,
number=30, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='input_image_type', full_name='DatasetConfig.input_image_type', index=7,
number=11, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='train_data_sources', full_name='DatasetConfig.train_data_sources', index=8,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='val_data_sources', full_name='DatasetConfig.val_data_sources', index=9,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='test_data_sources', full_name='DatasetConfig.test_data_sources', index=10,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='data_class_config', full_name='DatasetConfig.data_class_config', index=11,
number=18, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='augmentation_config', full_name='DatasetConfig.augmentation_config', index=12,
number=28, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='train_images_path', full_name='DatasetConfig.train_images_path', index=13,
number=12, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='train_masks_path', full_name='DatasetConfig.train_masks_path', index=14,
number=13, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='val_images_path', full_name='DatasetConfig.val_images_path', index=15,
number=14, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='val_masks_path', full_name='DatasetConfig.val_masks_path', index=16,
number=15, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='test_images_path', full_name='DatasetConfig.test_images_path', index=17,
number=16, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='test_masks_path', full_name='DatasetConfig.test_masks_path', index=18,
number=17, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=374,
serialized_end=937,
)
_TRAINDATASOURCE.fields_by_name['data_source'].message_type = _DATASOURCE
_VALDATASOURCE.fields_by_name['data_source'].message_type = _DATASOURCE
_TESTDATASOURCE.fields_by_name['data_source'].message_type = _DATASOURCE
_DATASETCONFIG.fields_by_name['train_data_sources'].message_type = _TRAINDATASOURCE
_DATASETCONFIG.fields_by_name['val_data_sources'].message_type = _VALDATASOURCE
_DATASETCONFIG.fields_by_name['test_data_sources'].message_type = _TESTDATASOURCE
_DATASETCONFIG.fields_by_name['data_class_config'].message_type = nvidia__tao__tf1_dot_cv_dot_unet_dot_proto_dot_data__class__config__pb2._DATACLASSCONFIG
_DATASETCONFIG.fields_by_name['augmentation_config'].message_type = nvidia__tao__tf1_dot_cv_dot_unet_dot_proto_dot_augmentation__config__pb2._AUGMENTATIONCONFIG
DESCRIPTOR.message_types_by_name['DataSource'] = _DATASOURCE
DESCRIPTOR.message_types_by_name['TrainDataSource'] = _TRAINDATASOURCE
DESCRIPTOR.message_types_by_name['ValDataSource'] = _VALDATASOURCE
DESCRIPTOR.message_types_by_name['TestDataSource'] = _TESTDATASOURCE
DESCRIPTOR.message_types_by_name['DatasetConfig'] = _DATASETCONFIG
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
DataSource = _reflection.GeneratedProtocolMessageType('DataSource', (_message.Message,), dict(
DESCRIPTOR = _DATASOURCE,
__module__ = 'nvidia_tao_tf1.cv.unet.proto.dataset_config_pb2'
# @@protoc_insertion_point(class_scope:DataSource)
))
_sym_db.RegisterMessage(DataSource)
TrainDataSource = _reflection.GeneratedProtocolMessageType('TrainDataSource', (_message.Message,), dict(
DESCRIPTOR = _TRAINDATASOURCE,
__module__ = 'nvidia_tao_tf1.cv.unet.proto.dataset_config_pb2'
# @@protoc_insertion_point(class_scope:TrainDataSource)
))
_sym_db.RegisterMessage(TrainDataSource)
ValDataSource = _reflection.GeneratedProtocolMessageType('ValDataSource', (_message.Message,), dict(
DESCRIPTOR = _VALDATASOURCE,
__module__ = 'nvidia_tao_tf1.cv.unet.proto.dataset_config_pb2'
# @@protoc_insertion_point(class_scope:ValDataSource)
))
_sym_db.RegisterMessage(ValDataSource)
TestDataSource = _reflection.GeneratedProtocolMessageType('TestDataSource', (_message.Message,), dict(
DESCRIPTOR = _TESTDATASOURCE,
__module__ = 'nvidia_tao_tf1.cv.unet.proto.dataset_config_pb2'
# @@protoc_insertion_point(class_scope:TestDataSource)
))
_sym_db.RegisterMessage(TestDataSource)
DatasetConfig = _reflection.GeneratedProtocolMessageType('DatasetConfig', (_message.Message,), dict(
DESCRIPTOR = _DATASETCONFIG,
__module__ = 'nvidia_tao_tf1.cv.unet.proto.dataset_config_pb2'
# @@protoc_insertion_point(class_scope:DatasetConfig)
))
_sym_db.RegisterMessage(DatasetConfig)
# @@protoc_insertion_point(module_scope)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/unet/proto/dataset_config_pb2.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test Dataset converter script."""
import os
import shutil
import tempfile
import pytest
@pytest.mark.skipif(os.getenv("RUN_ON_CI", "0") == "1", reason="Cannot be run on CI")
@pytest.mark.script_launch_mode('subprocess')
def test_dataset_convert(script_runner):
file_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
coco_json_file = os.path.join(file_path, 'tests/test_data/instances_val2017.json')
num_images = 5
results_dir = tempfile.mkdtemp()
if not os.path.isdir(results_dir):
os.makedirs(results_dir)
env = os.environ.copy()
script = "nvidia_tao_tf1/cv/unet/scripts/dataset_convert.py"
args = ['-f', coco_json_file,
'-r', results_dir,
'-n', str(num_images)]
ret = script_runner.run(script, env=env, *args)
# before abort, remove the created temp files when exception raises
try:
assert ret.success, "The dataset convert failed."
assert (len([f for f in os.listdir(results_dir) if f.endswith(".png")]) == num_images), \
"All the images were not converted to VOC."
except AssertionError:
raise AssertionError(ret.stdout + ret.stderr)
finally:
shutil.rmtree(results_dir)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/unet/tests/test_dataset_convert.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""test custom Unet Custom loss."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from nvidia_tao_tf1.cv.unet.utils.model_fn import dice_coef
def test_loss():
y_true = np.array([[0, 0, 1, 0], [0, 0, 1, 0], [0, 0, 1., 0.]])
y_pred = np.array([[0, 0, 0.9, 0], [0, 0, 0.1, 0], [1, 1, 0.1, 1.]])
dice_loss = tf.reduce_mean(1 - dice_coef(tf.constant(y_pred), tf.constant(y_true)),
name='dice_loss')
with tf.Session() as sess:
assert abs(sess.run(dice_loss)) == 0.5858765827258523
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/unet/tests/test_utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test train script."""
import os
import shutil
import pytest
import tensorflow as tf
@pytest.fixture
def _spec_file():
"""Get UNet default file."""
file_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
default_spec_path_1 = os.path.join(file_path, '../experiment_specs/default1.txt')
default_spec_path_2 = os.path.join(file_path, '../experiment_specs/default2.txt')
spec_files = [default_spec_path_1, default_spec_path_2]
return spec_files
@pytest.mark.skipif(os.getenv("RUN_ON_CI", "0") == "1", reason="Cannot be run on CI")
@pytest.mark.script_launch_mode('subprocess')
def test_train_script(tmpdir, script_runner):
"""Test train script."""
file_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
tmpdir = os.path.join(file_path, "tmp_results")
script = 'nvidia_tao_tf1/cv/unet/scripts/train.py'
env = os.environ.copy()
spec_files = _spec_file()
for spec_file in spec_files:
args = ['-k', 'nvidia_tlt',
'-e', spec_file,
'-r', tmpdir]
tf.keras.backend.clear_session()
ret = script_runner.run(script, env=env, *args)
try:
assert ret.success
except AssertionError:
print("Local path is not ready.")
finally:
if os.path.exists(tmpdir):
shutil.rmtree(tmpdir)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/unet/tests/test_train.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test Export script."""
import json
import os
import tempfile
import pytest
import shutil
from nvidia_tao_tf1.cv.unet.distribution import distribution
from nvidia_tao_tf1.cv.unet.model.build_unet_model import build_model, select_model_proto
from nvidia_tao_tf1.cv.unet.model.utilities import build_target_class_list, \
get_train_class_mapping, initialize_params
from nvidia_tao_tf1.cv.unet.model.utilities import get_pretrained_ckpt, \
get_pretrained_model_path, initialize, update_model_params
from nvidia_tao_tf1.cv.unet.spec_handler.spec_loader import load_experiment_spec
data_types = [("fp32"), ("int8")]
@pytest.mark.skipif(os.getenv("RUN_ON_CI", "0") == "1", reason="Cannot be run on CI")
@pytest.mark.script_launch_mode('subprocess')
@pytest.mark.parametrize("data_type",
data_types)
def test_export(script_runner, data_type):
file_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
spec_path = os.path.join(file_path, 'experiment_specs/default1.txt')
experiment_spec = load_experiment_spec(spec_path, merge_from_default=False)
key = "nvidia-tao"
# Initialize the environment
initialize(experiment_spec)
# Initialize Params
params = initialize_params(experiment_spec)
results_dir = tempfile.mkdtemp()
if distribution.get_distributor().is_master():
if not os.path.isdir(results_dir):
os.makedirs(results_dir)
target_classes = build_target_class_list(
experiment_spec.dataset_config.data_class_config)
target_classes_train_mapping = get_train_class_mapping(target_classes)
with open(os.path.join(results_dir, 'target_class_id_mapping.json'), 'w') as fp:
json.dump(target_classes_train_mapping, fp)
# Build run config
model_config = select_model_proto(experiment_spec)
unet_model = build_model(m_config=model_config,
target_class_names=target_classes,
seed=params["seed"])
pretrained_model_file = None
custom_objs = None
input_model_file_name = get_pretrained_model_path(pretrained_model_file)
pre_trained_weights, model_json, _ = \
get_pretrained_ckpt(input_model_file_name, key, custom_objs=custom_objs)
ckpt_dir = os.path.split(os.path.abspath(pre_trained_weights))[0]
img_height, img_width, img_channels = experiment_spec.model_config.model_input_height, \
experiment_spec.model_config.model_input_width, \
experiment_spec.model_config.model_input_channels
params = update_model_params(params=params, unet_model=unet_model,
experiment_spec=experiment_spec, key=key,
results_dir=results_dir,
target_classes=target_classes,
model_json=model_json,
custom_objs=custom_objs
)
unet_model.construct_model(
input_shape=(img_channels, img_height, img_width),
pretrained_weights_file=params.pretrained_weights_file,
enc_key=key, model_json=params.model_json,
features=None)
ckzip_file = ckpt_dir
tmp_onnx_model = os.path.join(file_path, "tmp.onnx")
if os.path.exists(tmp_onnx_model):
os.remove(tmp_onnx_model)
env = os.environ.copy()
script = "nvidia_tao_tf1/cv/unet/scripts/export.py"
if data_type == "fp32":
args = ['-m', ckzip_file,
'-k', key,
'--experiment_spec', spec_path,
'-o', tmp_onnx_model]
ret = script_runner.run(script, env=env, *args)
# before abort, remove the created temp files when exception raises
try:
assert ret.success
assert os.path.isfile(tmp_onnx_model)
if os.path.exists(tmp_onnx_model):
os.remove(tmp_onnx_model)
finally:
# if the script runner failed, the tmp_onnx_model may not be created at all
if os.path.exists(tmp_onnx_model):
os.remove(tmp_onnx_model)
shutil.rmtree(ckzip_file)
os_handle, tmp_data_file = tempfile.mkstemp()
os.close(os_handle)
os.remove(tmp_data_file)
os_handle, tmp_cache_file = tempfile.mkstemp()
os.close(os_handle)
os.remove(tmp_cache_file)
if data_type == "int8":
args = ['-m', ckzip_file,
'-k', key,
'--experiment_spec', spec_path,
'-o', tmp_onnx_model,
'--data_type', 'int8',
'--cal_cache_file', tmp_cache_file,
"--cal_data_file", tmp_data_file
]
ret = script_runner.run(script, env=env, *args)
# before abort, remove the created temp files when exception raises
try:
assert ret.success
assert os.path.isfile(tmp_onnx_model), (
f"Output model wasn't generated: {tmp_onnx_model}"
)
except AssertionError:
raise AssertionError(ret.stdout + ret.stderr)
finally:
# if the script runner failed, the tmp_onnx_model may not be created at all
if os.path.exists(tmp_onnx_model):
os.remove(tmp_onnx_model)
shutil.rmtree(ckzip_file)
if os.path.exists(ckzip_file):
shutil.rmtree(ckzip_file)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/unet/tests/test_export.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Process-distribution functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from nvidia_tao_tf1.cv.unet.distribution.distribution import Distributor, HorovodDistributor, hvd
from nvidia_tao_tf1.cv.unet.distribution.distribution import get_distributor, set_distributor
__all__ = (
"Distributor",
"hvd",
"HorovodDistributor",
"set_distributor",
"get_distributor",
)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/unet/distribution/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Process-distribution functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import tensorflow as tf
try:
from functools import lru_cache
except ImportError:
from functools32 import lru_cache
if os.environ.get("TF_KERAS"):
from tensorflow import keras # pylint: disable=C0412
else:
import keras
class Distributor(object):
"""Base distributor object.
This base distributor object behaves as if only one process is running, without any actual
distribution.
"""
def __init__(self, master_rank=0, per_process_gpu_memory_fraction=None):
"""__init__ method.
Args:
master_rank (int): specifies the intended rank of the master.
per_process_gpu_memory_fraction (float): fraction of GPU memory to reserve for
TensorFlow. Ignored if unset.
"""
if master_rank >= self.size():
raise ValueError(
"Requested a master rank of {}, which should be smaller than "
"the distribution size ({}).".format(master_rank, self.size())
)
self._master_rank = master_rank
self._per_process_gpu_memory_fraction = per_process_gpu_memory_fraction
def get_config(self):
"""Get configuration to pass to tf Session."""
config = tf.compat.v1.ConfigProto()
config.allow_soft_placement = True
config.gpu_options.visible_device_list = str(self.local_rank())
config.gpu_options.allow_growth = True
if self._per_process_gpu_memory_fraction is not None:
config.gpu_options.per_process_gpu_memory_fraction = (
self._per_process_gpu_memory_fraction
)
return config
def size(self):
"""Get the size.
Returns:
Total amount of processes.
"""
return 1
def local_size(self):
"""Get the local size.
NOTE: 'local' is defined as 'inside the current node'.
Returns:
Total amount of distributed processes locally (int).
"""
return 1
def rank(self):
"""Get the rank.
Returns:
Global rank (index).
"""
return 0
def local_rank(self):
"""Get the local rank.
NOTE: 'local' is defined as 'inside the current node'.
Returns:
Local rank (int).
"""
return 0
def is_multi_node(self):
"""Check if we are running distribution over multiple nodes.
Returns:
A boolean indicating if we have processes running on (True) multiple nodes or
a single node (False).
"""
return self.size() != self.local_size()
def is_master(self):
"""Check if the current process is the master process.
Returns:
A boolean indicating if the current process is the master process.
"""
return self._master_rank == self.rank()
def is_distributed(self):
"""Check if we're running in distributed mode.
Returns:
A boolean indicating if we in a distributed setting.
"""
return self.size() > 1
def distributed_seed(self, seed):
"""Get a distributed seed, to avoid the same seed per process.
Args:
seed (int): the current fixed seed.
Returns:
A perturbed seed depending on rank, as a function of the input seed.
"""
if seed is None:
return seed
return seed + self.rank()
def broadcast_global_variables(self):
"""Broadcast variables from master rank to all other processes."""
pass
def distribute_optimizer(self, optimizer):
"""Distribute the input optimizer."""
return optimizer
def allreduce(self, value):
"""Allreduce operation that sums value across GPUs.
Args:
value: value to be summed.
Returns:
Sum of value across all GPUs.
"""
return value
def shutdown(self):
"""Shut the distribution strategy down."""
sys.exit("A request has been made to shutdown the distribution strategy.")
def distributed_gradient_tape(self, tape):
"""Add distributed GradientTape for tensorflow eager mode.
Args:
tape (tf.GradientTape): Recorded operations of automatic differentiation.
Returns:
tape (tf.GradientTape): The input tape wrapped in a tape that takes
care of the distribution.
"""
return tape
def broadcast_variables(self, variables, root_rank=0):
"""Broadcast variables from root_rank to other ranks.
Args:
variables (tf.Variable): Tensorflow variables that need to be broadcast.
root_rank (int): From which rank the variables need to be broadcast.
"""
pass
@lru_cache()
def hvd():
"""Lazily load and return the (cached) horovod module."""
import horovod.tensorflow as hvd
return hvd
class HorovodDistributor(Distributor):
"""Horovod distributor object.
This object wraps several horovod functions and provides some utilities. Notice that the
horovod module is lazily loaded so that it is only a dependency to maglev when you want to
use the HorovodDistributor object.
The documentation of horovod is hosted on `<https://github.com/uber/horovod>`_. Horovod's core
principles are based on
`MPI <https://github.com/uber/horovod/blob/master/docs/concepts.md>`_.
This distributor parallelizes your training script by using custom Tensorflow operations
and leveraging MPI. The parallelization of your script is done through launching your script
using ``MPI``, for example using OpenMPI's `mpirun`. So instead of::
python train.py
One would launch 4 local processes using::
mpirun -np 4 python train.py
Where ``train.py`` should use the current distributor and its methods. Horovod will then
map each of the 4 local processes on different GPUs. If you do not want to use the horovod
distributor, but want to have your code distributor-enabled, you can just use the base
:any:`Distributor` class, that is undistributed by default and acts as
a passthrough.
"""
def __init__(self, **kwargs):
"""__init__ method.
Initializes horovod, and pins GPUs to the current session. This initialization should
happen before any of the other distribution functions are imported, used or called.
Args:
**kwargs: arbitrary keyword arguments.
"""
hvd().init()
super(HorovodDistributor, self).__init__(**kwargs)
if not tf.executing_eagerly():
# Pin GPU to be used to process local rank (one GPU per process)
session = tf.compat.v1.Session(config=self.get_config())
keras.backend.set_session(session)
else:
gpus = tf.config.experimental.list_physical_devices("GPU")
if self.local_rank() >= len(gpus):
raise ValueError(
"Requesting a local rank {}, which should be"
"smaller than the gpu count {}.".format(
self.local_rank(), len(gpus)
)
)
gpu = gpus[self.local_rank()]
tf.config.experimental.set_memory_growth(gpu, True)
tf.config.experimental.set_visible_devices(gpu, "GPU")
def size(self):
"""Get the size.
Returns:
Total amount of distributed processes (int).
"""
return hvd().size()
def local_size(self):
"""Get the local size.
NOTE: 'local' is defined as 'inside the current node'.
Returns:
Total amount of distributed processes locally (int).
"""
return hvd().local_size()
def rank(self):
"""Get the rank.
The rank can be considered the current global unique rank (or index), where all nodes and
all processes within a node are considered.
Returns:
Rank (int)
"""
return hvd().rank()
def local_rank(self):
"""Get the local rank.
NOTE: 'local' is defined as 'inside the current node'.
Returns:
Local rank (int).
"""
return hvd().local_rank()
def broadcast_global_variables(self):
"""Broadcast variables from master rank to all other processes.
This function should be called after all variables are created, but before evaluating any
operations that require distribution, like allreduce or using the distributed optimizer.
"""
broadcast_ops = hvd().broadcast_global_variables(self._master_rank)
keras.backend.get_session().run(broadcast_ops)
def broadcast_global_variables_hook(self):
"""Broadcast variables from master rank to all other processes.
BroadcastGlobalVariablesHook broadcasts initial variable states from rank 0 to all other
processes. This is necessary to ensure consistent initialization of all workers when
training is started with random weights or restored from a checkpoint.
Returns:
A instance that inherits from a `tf.estimator.SessionRunHook` object that takes care of
variable initialization across processes.
"""
# Note that the class inherits from a lazy horovod import, which is why it is defined
# inline.
class _ScopedBroadcastGlobalVariablesHook(hvd().BroadcastGlobalVariablesHook):
"""Class that wraps the global variables broadcast hook into one with op scopes."""
def begin(self, *args, **kwargs):
"""Call begin by forwarding the begin call within a tf name scope."""
with tf.compat.v1.name_scope("horovod_broadcast"):
super(_ScopedBroadcastGlobalVariablesHook, self).begin(
*args, **kwargs
)
return _ScopedBroadcastGlobalVariablesHook(0)
def distribute_optimizer(self, optimizer):
"""Distribute the input optimizer.
Args:
optimizer: a tensorflow optimizer object to be distributed.
Returns:
The input optimizer wrapped in an optimizer that takes care of the distribution.
"""
hvd_optimizer = hvd().DistributedOptimizer(optimizer)
return hvd_optimizer
def allreduce(self, value):
"""Allreduce operation that sums value across GPUs.
Args:
value: value to be summed.
Returns:
Sum of value across all GPUs.
"""
return hvd().allreduce(value)
def shutdown(self):
"""Shut horovod down.
Note that while this does not exit the process, if, later down the line, another process
sends tensors to be reduced / gathered through horovod, the latter will detect that it
has been shutdown, and crash as (hopefully) appropriate.
"""
hvd().shutdown()
def distributed_gradient_tape(self, tape):
"""Add Horovod Distributed GradientTape for tensorflow eager mode.
Args:
tape (tf.GradientTape): Recorded operations of automatic differentiation.
Returns:
tape (tf.GradientTape): The input tape wrapped in a tape that takes
care of the distribution.
"""
return hvd().DistributedGradientTape(tape)
def broadcast_variables(self, variables, root_rank=0):
"""Broadcast variables from root_rank to other ranks.
Args:
variables (tf.Variable): tensorflow variables that need to be broadcast.
root_rank (int): From which rank the variables need to be broadcast.
"""
hvd().broadcast_variables(variables, root_rank=root_rank)
# Define the distributor here so it's static.
_DISTRIBUTOR = Distributor()
def set_distributor(d):
"""Set the distributor.
Args:
d: an instance who's class derives from Distributor to serve as the new distribution object.
"""
global _DISTRIBUTOR # pylint: disable=W0603
_DISTRIBUTOR = d
def get_distributor():
"""Get the distributor."""
global _DISTRIBUTOR # pylint: disable=W0602,W0603
return _DISTRIBUTOR
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/unet/distribution/distribution.py |
# Copyright 2019-2020 NVIDIA Corporation. All rights reserved.
"""UNet TensorRT Evaluation Derived from Inference."""
import os
import numpy as np
from tqdm import tqdm
from nvidia_tao_tf1.cv.unet.utils.inference_trt import Inferencer
class Evaluator(Inferencer):
"""Manages TensorRT objects for model evaluation."""
def __init__(self, *args, **kwargs):
"""Init function."""
super(Evaluator, self).__init__(*args, **kwargs)
def _load_img_label(self, img_filename, mask_filename):
"""load an image and returns images and corresponding label numpy array.
Args:
img_filename (str): path to an image
mask_filename (str): path to mask filename.
Returns:
inputs: Pre-processed numpy array
labels: One hot encoded corresponding labels
"""
inputs, labels, _ = self.dataset.read_image_and_label_tensors(img_filename,
mask_filename)
inputs, labels, _ = self.dataset.rgb_to_bgr_tf(inputs, labels)
inputs, labels, _ = self.dataset.cast_img_lbl_dtype_tf(inputs, labels)
inputs, labels, _ = self.dataset.resize_image_and_label_tf(inputs, labels)
inputs, labels, _ = self.dataset.normalize_img_tf(inputs, labels)
inputs, labels, _ = self.dataset.transpose_to_nchw(inputs, labels)
inputs, labels, _ = self.dataset.prednn_categorize_label(inputs, labels)
inputs = inputs.eval(session=self.session)
labels = labels.eval(session=self.session)
inputs = np.array(inputs)
labels = np.array(labels)
return inputs, labels
def _predict_batch(self, inf_inputs, inf_labels):
'''function to predict a batch and compute conf matrix.'''
inf_inputs_np = np.array(inf_inputs)
inf_labels_np = np.array(inf_labels)
y_pred = self.pred_fn(inf_inputs_np)
predictions_batch = self.eval_process_fn(y_pred, inf_labels_np)
return predictions_batch
def eval_process_fn(self, y_pred, inf_labels_np):
'''Post process the TRT inference output by reshaping.'''
predictions_batch = []
for idx in range(y_pred[0].shape[0]):
gt = inf_labels_np[idx, ...]
pred = np.reshape(y_pred[0][idx, ...], (self.dataset.model_output_height,
self.dataset.model_output_width,
1))
if self.activation == "sigmoid":
pred = np.squeeze(pred, axis=-1)
gt = np.squeeze(gt, axis=0)
pred = np.where(pred > 0.5, 1, 0)
else:
gt = np.argmax(gt, axis=0)
pred_flatten = pred.flatten()
gt_flatten = gt.flatten()
conf_matrix = self.compute_confusion_matrix(gt_flatten, pred_flatten)
pred_dic = {"conf_matrix": conf_matrix}
predictions_batch.append(pred_dic)
return predictions_batch
def compute_confusion_matrix(self, true, pred):
'''Sklearn equivalent function that handles GT without 1 class.'''
true = true.astype(np.int32)
pred = pred.astype(np.int32)
K = self.num_conf_mat_classes
result = np.zeros((K, K))
for i in range(len(true)):
result[true[i]][pred[i]] += 1
return result
def _evaluate_folder(self, img_names_list, masks_names_list):
"""evaluate in a folder of images.
Args:
img_names_list: list of img names
masks_names_list: list of mask names
"""
predictions = []
n_batches = (len(img_names_list) + self.batch_size - 1) // self.batch_size
for batch_idx in tqdm(range(n_batches)):
inf_inputs = []
inf_labels = []
for img_path, mask_path in zip(img_names_list[
batch_idx*self.batch_size:(batch_idx+1)*self.batch_size
], masks_names_list[
batch_idx*self.batch_size:(batch_idx+1)*self.batch_size
]):
_, ext = os.path.splitext(img_path)
if ext not in self.supported_img_format:
raise ValueError("Provided image format {} is not supported!".format(ext))
inf_input, inf_label = self._load_img_label(img_path, mask_path)
inf_labels.append(inf_label)
inf_inputs.append(inf_input)
y_pred_batch = self._predict_batch(inf_inputs, inf_labels)
predictions += y_pred_batch
return predictions
def evaluate(self, image_names_list, masks_names_list):
"""Wrapper function for evaluation."""
if not image_names_list or not masks_names_list:
raise ValueError("Input images and Input masks should not"
"be empty for evaluation!")
predictions = self._evaluate_folder(image_names_list,
masks_names_list)
self.session.close()
return predictions
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/unet/utils/evaluate_trt.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions to format the performance and accuracy results."""
import argparse
import os
import numpy as np
from nvidia_tao_tf1.core.utils.path_utils import expand_path
def process_performance_stats(timestamps, batch_size):
"""Formats the performance statistics."""
timestamps_ms = 1000 * timestamps
timestamps_ms = timestamps_ms[timestamps_ms > 0]
latency_ms = timestamps_ms.mean()
std = timestamps_ms.std()
n = np.sqrt(len(timestamps_ms))
throughput_imgps = (1000.0 * batch_size / timestamps_ms).mean()
print('Throughput Avg:', round(throughput_imgps, 3), 'img/s')
print('Latency Avg:', round(latency_ms, 3), 'ms')
for ci, lvl in zip(["90%:", "95%:", "99%:"],
[1.645, 1.960, 2.576]):
print("Latency", ci, round(latency_ms + lvl * std / n, 3), "ms")
return float(throughput_imgps), float(latency_ms)
def parse_convergence_results(path, environment):
"""Formats the results to logging format."""
dice_scores = []
ce_scores = []
logfiles = []
if os.path.isdir(expand_path(path)):
logfiles = [f for f in os.listdir(expand_path(path)) if "log" in f and environment in f]
if not logfiles:
raise FileNotFoundError("No logfile found at {}".format(path))
for logfile in logfiles:
with open(expand_path(f"{path}/{logfile}"), "r") as f:
content = f.readlines()
if "eval_dice_score" not in content[-1]:
print("Evaluation score not found. The file", logfile, "might be corrupted.")
continue
dice_scores.append(float([val for val in content[-1].split()
if "eval_dice_score" in val][0].split(":")[1]))
ce_scores.append(float([val for val in content[-1].split()
if "eval_ce_loss" in val][0].split(":")[1]))
if dice_scores:
print("Evaluation dice score:", sum(dice_scores) / len(dice_scores))
print("Evaluation cross-entropy loss:", sum(ce_scores) / len(ce_scores))
else:
print("All logfiles were corrupted, no loss was obtained.")
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="UNet-medical-utils")
parser.add_argument('--exec_mode',
choices=['convergence', 'benchmark'],
type=str,
help="""Which execution mode to run the model into""")
parser.add_argument('--model_dir',
type=str,
required=True)
parser.add_argument('--env',
choices=['FP32_1GPU', 'FP32_8GPU', 'TF-AMP_1GPU', 'TF-AMP_8GPU'],
type=str,
required=True)
args = parser.parse_args()
if args.exec_mode == 'convergence':
parse_convergence_results(path=args.model_dir, environment=args.env)
elif args.exec_mode == 'benchmark':
pass
print()
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/unet/utils/parse_results.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Dataset class encapsulates the data loading."""
import logging
import os
import numpy as np
from PIL import Image
from sklearn.utils import shuffle
import tensorflow as tf
from nvidia_tao_tf1.core.utils.path_utils import expand_path
logger = logging.getLogger(__name__)
UNKNOWN_CLASS = '-1'
class Dataset():
"""Load, separate and prepare the data for training, prediction and evaluation."""
def __init__(self, batch_size, fold=1, augment=False, gpu_id=0,
num_gpus=1, params=None, phase="train", target_classes=None,
buffer_size=None, data_options=True, filter_data=False):
"""Instantiate the dataloader.
Args:
data_dir (str): List of tuples (tfrecord_file_pattern,
image_directory_path) to use for training.
batch_size (int): Batch size to be used for training
fold (int): The fold to be used for benchmarking.
augment (bool): Holds the parameters for augmentation and
preprocessing.
gpu_id (int): The GPU id to be used for training.
params (dic): Dictionary containing the parameters for the run_config
of the estimator
num_gpus (int): Number of GPU's to be used for training
phase(str): train/ infer/ val
target_classes(list): list of target class objects
buffer_size(int): Buffer size to use the no. of samples per step
data_options(bool): Data options to vectorize the data loading
filter_data(bool): Filter images/ masks that are not present
"""
self._batch_size = batch_size
self._augment = augment
self.filter_data = filter_data
self.image_global_index = 0
self._seed = params.seed
self.resize_padding = params.resize_padding
self.resize_method = params.resize_method
self.backbone = params.experiment_spec.model_config.arch
self.model_input_height = params.experiment_spec.model_config.model_input_height
self.model_input_width = params.experiment_spec.model_config.model_input_width
self.model_input_channels = params.experiment_spec.model_config.model_input_channels
self.model_arch = params.experiment_spec.model_config.arch
self.model_output_height, self.model_output_width = self.get_output_dimensions()
self.input_image_type = params.experiment_spec.dataset_config.input_image_type
# Setting the default input image type to color
if not self.input_image_type:
self.input_image_type = "color"
self._num_gpus = num_gpus
self._gpu_id = gpu_id
self.phase = phase
self.supported_img_formats = ["png", "jpg", "jpeg", "PNG", "JPG", "JPEG"]
self.dataset = params.experiment_spec.dataset_config.dataset
self.train_data_sources = \
params.experiment_spec.dataset_config.train_data_sources.data_source
self.val_data_sources = params.experiment_spec.dataset_config.val_data_sources.data_source
self.test_data_sources = params.experiment_spec.dataset_config.test_data_sources.data_source
self.train_images_path = params.experiment_spec.dataset_config.train_images_path
self.train_masks_path = params.experiment_spec.dataset_config.train_masks_path
self.val_images_path = params.experiment_spec.dataset_config.val_images_path
self.val_masks_path = params.experiment_spec.dataset_config.val_masks_path
self.test_images_path = params.experiment_spec.dataset_config.test_images_path
self.test_image_names = []
self.test_mask_names = []
self.image_names_list, self.masks_names_list = self.get_images_masks_lists()
self.buffer_size = buffer_size if buffer_size else len(self.image_names_list)
assert(self.buffer_size <= len(self.image_names_list)), \
"Buffer size should not be more than total dataset size."
self.decode_fn_img, self.decode_fn_label = self.validate_extension()
self.img_input_height, self.img_input_width, self.img_input_channels = \
self.get_input_shape()
self.target_classes = target_classes
self.lookup_table = None
self.label_train_dic = self.get_label_train_dic()
self.num_classes = params.num_classes
self.use_amp = params.use_amp
self.preprocess = params.experiment_spec.dataset_config.preprocess if \
params.experiment_spec.dataset_config.preprocess else "min_max_-1_1"
self.augmentation_params = params.experiment_spec.dataset_config.augmentation_config
self.data_options = data_options
print("\nPhase %s: Total %d files." % (self.phase, len(self.image_names_list)))
def validate_extension(self):
"""Function to validate the image/ label extension and computing extension."""
assert(len(self.image_names_list) > 0), \
"Please check images path. The input image list is empty."
img_ext = self.image_names_list[0].split(".")[-1]
assert(img_ext in self.supported_img_formats), "Image Extension is not supported."
decode_fn_img = self.get_decode_fn(img_ext)
decode_fn_label = None
if self.masks_names_list[0]:
label_ext = self.masks_names_list[0].split(".")[-1]
assert(label_ext in self.supported_img_formats), "Label Extension is not supported."
decode_fn_label = self.get_decode_fn(label_ext)
return decode_fn_img, decode_fn_label
def get_input_shape(self):
"""Function to get input shape."""
img_name = self.image_names_list[0]
img_arr = np.array(Image.open(img_name))
input_img_shape = img_arr.shape
img_input_height = input_img_shape[0]
img_input_width = input_img_shape[1]
if len(img_arr.shape) == 2:
img_input_channels = 1
else:
img_input_channels = input_img_shape[2]
if self.model_arch == "vanilla_unet":
if(self.model_input_height != 572 and self.model_input_width != 572):
logging.info("The input height and width for vanilla unet is defaulted to \
572")
self.model_input_height = 572
self.model_input_width = 572
else:
try:
assert(self.model_input_height % 16 == 0 and self.model_input_width % 16 == 0)
except Exception:
raise ValueError("The input height and width for Resnet and VGG backbones \
should be multiple of 16")
return img_input_height, img_input_width, img_input_channels
def extract_image_mask_names_from_datasource(self, data_sources):
"""Function to get the image and mask paths from multiple data sources."""
images_list = []
masks_list = []
for data_source in data_sources:
image_path = data_source.image_path if data_source.image_path else None
mask_path = data_source.masks_path if data_source.masks_path else None
images_list.append(image_path)
# The mask list is None when the masks are not provided
masks_list.append(mask_path)
return images_list, masks_list
def read_data_image_dir(self, images_dir, masks_dir):
"""Function to get the image and mask paths."""
image_names_list = [os.path.join(images_dir, f) for f in os.listdir(images_dir)]
if masks_dir:
masks_names_list = [os.path.join(masks_dir, f) for f in os.listdir(masks_dir)]
else:
# It is inference
masks_names_list = [None for _ in image_names_list]
return image_names_list, masks_names_list
def get_images_masks_lists(self):
"""Function to get the image and mask get_images_masks_paths."""
if self.phase == "train":
data_sources = self.train_data_sources
if not data_sources:
# Set the images/ masks path
images_dir = self.train_images_path
masks_dir = self.train_masks_path
elif self.phase == "val":
data_sources = self.val_data_sources
if not data_sources:
# Set the images/ masks path
images_dir = self.val_images_path
masks_dir = self.val_masks_path
elif self.phase == "test":
data_sources = self.test_data_sources
if not data_sources:
# Set the images/ masks path
images_dir = self.test_images_path
# Masks are not required for test
masks_dir = None
if data_sources:
images_list, masks_list = self.extract_image_mask_names_from_datasource(
data_sources)
image_names_list, masks_names_list = self.read_data_list_files(images_list,
masks_list)
elif images_dir:
image_names_list, masks_names_list = self.read_data_image_dir(images_dir,
masks_dir)
return shuffle(image_names_list, masks_names_list)
def get_label_train_dic(self):
""""Function to get mapping between class and train id's."""
label_train_dic = {}
for target in self.target_classes:
label_train_dic[target.label_id] = target.train_id
return label_train_dic
def read_data_list_files(self, images_list, masks_list):
"""" Reads text files specifying the list of x and y items."""
x_set_filt = []
y_set_filt = []
for imgs, lbls in zip(images_list, masks_list):
print("Reading Imgs : {}, Reading Lbls : {}".format(imgs, lbls))
if self.phase == "train":
assert os.path.isfile(imgs) and imgs.endswith(".txt"), (
f"Image file doesn't exist at {imgs}"
)
assert os.path.isfile(lbls) and lbls.endswith(".txt"), (
f"Label file doesn't exist at {lbls}"
)
# Both are valid text files. So read from them.
with open(imgs) as f:
x_set = f.readlines()
# Remove whitespace characters like `\n` at the end of each line
if lbls:
with open(lbls) as f:
y_set = f.readlines()
for f_im, f_label in zip(x_set, y_set):
# Ensuring all image files are present
f_im = f_im.strip()
f_label = f_label.strip()
if self.filter_data:
if os.path.isfile(expand_path(f_im)) and os.path.isfile(expand_path(f_label)):
x_set_filt.append(f_im)
y_set_filt.append(f_label)
else:
x_set_filt.append(f_im)
y_set_filt.append(f_label)
else:
# During inference we do not filter
y_set_filt += [None for _ in x_set]
x_set_filt += [x.strip() for x in x_set]
return x_set_filt, y_set_filt
def augment(self, x, y, x_orig=None):
""""Map function to augment input x and y."""
if self._augment:
# Default values
# If the user provides augment alone and not the aug config.
hflip_probability = 0.5
vflip_probability = 0.5
crop_and_resize_prob = 0.5
crop_and_resize_ratio = 0.1
delta = 0.2
if self.augmentation_params:
# If spatial augmentation params are provided
if self.augmentation_params.spatial_augmentation:
hflip_probability = \
self.augmentation_params.spatial_augmentation.hflip_probability
vflip_probability = \
self.augmentation_params.spatial_augmentation.vflip_probability
crop_and_resize_prob = \
self.augmentation_params.spatial_augmentation.crop_and_resize_prob
crop_and_resize_ratio = \
self.augmentation_params.spatial_augmentation.crop_and_resize_ratio
# Reverting to default values if not present
if not hflip_probability:
hflip_probability = 0.5
if not vflip_probability:
vflip_probability = 0.5
if not crop_and_resize_prob:
crop_and_resize_prob = 0.5
if self.augmentation_params.brightness_augmentation:
delta = self.augmentation_params.brightness_augmentation.delta
if not delta:
delta = 0.2
# Horizontal flip
h_flip = tf.random_uniform([]) < hflip_probability
x = tf.cond(h_flip, lambda: tf.image.flip_left_right(x), lambda: x)
y = tf.cond(h_flip, lambda: tf.image.flip_left_right(y), lambda: y)
# Vertical flip
v_flip = tf.random_uniform([]) < vflip_probability
x = tf.cond(v_flip, lambda: tf.image.flip_up_down(x), lambda: x)
y = tf.cond(v_flip, lambda: tf.image.flip_up_down(y), lambda: y)
# Prepare for batched transforms
x = tf.expand_dims(x, 0)
y = tf.expand_dims(y, 0)
# Random crop and resize
crop_and_resize = tf.random_uniform([]) < crop_and_resize_prob
left = tf.random_uniform([]) * crop_and_resize_ratio
right = 1 - tf.random_uniform([]) * crop_and_resize_ratio
top = tf.random_uniform([]) * crop_and_resize_ratio
bottom = 1 - tf.random_uniform([]) * crop_and_resize_ratio
x = tf.cond(
crop_and_resize,
lambda: tf.image.crop_and_resize(x, [[top, left, bottom, right]], [0],
(self.model_input_height,
self.model_input_width)), lambda: x)
y = tf.cond(
crop_and_resize,
lambda: tf.image.crop_and_resize(y, [[top, left, bottom, right]], [0],
(self.model_input_height,
self.model_input_width),
method="nearest"), lambda: y)
# Adjust brightness and keep values in range
x = tf.image.random_brightness(x, max_delta=delta)
if self.preprocess == "min_max_-1_1":
x = tf.clip_by_value(x, clip_value_min=-1, clip_value_max=1)
elif self.preprocess == "min_max_0_1":
x = tf.clip_by_value(x, clip_value_min=0, clip_value_max=1)
x = tf.squeeze(x, 0)
y = tf.squeeze(y, 0)
if x_orig is not None:
# Apply respective transformations except normalization to input image
x_orig = tf.cond(h_flip, lambda: tf.image.flip_left_right(x_orig), lambda: x_orig)
x_orig = tf.cond(v_flip, lambda: tf.image.flip_up_down(x_orig), lambda: x_orig)
x_orig = tf.expand_dims(x_orig, 0)
x_orig = tf.cond(
crop_and_resize,
lambda: tf.image.crop_and_resize(x_orig, [[top, left, bottom, right]], [0],
(self.model_input_height,
self.model_input_width)), lambda: x_orig)
x_orig = tf.squeeze(x_orig, 0)
return x, y, x_orig
def resize_vanilla(self, x, y, x_orig=None):
"""Function to resize the output to mode output size for Vanilla Unet."""
if y is not None:
if self.model_arch == "vanilla_unet":
y = tf.image.resize_image_with_crop_or_pad(
y, target_width=self.model_output_width,
target_height=self.model_output_height)
return x, y, x_orig
return x
@property
def train_size(self):
"""Function to get the size of the training set."""
return len(self.image_names_list)
@property
def eval_size(self):
"""Function that returns the size the eval dataset."""
return len(self.image_names_list)
@property
def test_size(self):
"""Function that returns the size the test dataset."""
return len(self.image_names_list)
def get_output_dimensions(self):
"""Function to return model input heights and width."""
if self.model_arch == "vanilla_unet":
return 388, 388
return self.model_input_height, self.model_input_width
def get_test_image_names(self):
"""Function that returns the test image names."""
return self.image_names_list
def get_test_mask_names(self):
"""Function that returns the test image names."""
return self.test_mask_names
@staticmethod
def get_decode_fn(ext):
"""Function to assign the decode function."""
if ext.lower() in ["jpg", "jpeg"]:
decode_fn = tf.io.decode_jpeg
else:
# EXT should be png
decode_fn = tf.io.decode_png
return decode_fn
def read_image_and_label_tensors(self, img_path, label=None):
"""Function to read image tensor."""
self.test_image_names.append(img_path)
x_str = tf.io.read_file(img_path)
x = self.decode_fn_img(contents=x_str, channels=self.model_input_channels)
x_orig = x
if self.input_image_type == "grayscale":
# Grayscale needs to be normalized before resizing
x = tf.cast(x, dtype="float32")
x = tf.divide(x, 127.5) - 1
if label is not None:
y_str = tf.io.read_file(label)
y = self.decode_fn_label(contents=y_str, channels=1)
if self.input_image_type == "grayscale":
y = tf.divide(y, 255)
y = tf.cast(y, dtype="float32")
return x, y, x_orig
return x
def apply_label_mapping_tf(self, x, y=None, x_orig=None):
"""Map Function to apply class mapping."""
if self.input_image_type == "grayscale":
return x, y, x_orig
if y is not None:
y = tf.cast(y, dtype=tf.int64)
if self.lookup_table is None:
keys = list(self.label_train_dic.keys())
values = list(self.label_train_dic.values())
keys = tf.cast(tf.constant(keys), dtype=tf.int64)
values = tf.cast(tf.constant(values), dtype=tf.int64)
self.lookup_table = tf.contrib.lookup.HashTable(
tf.contrib.lookup.KeyValueTensorInitializer(keys, values), 0)
y = self.lookup_table.lookup(y)
return x, y, x_orig
def rgb_to_bgr_tf(self, x, y=None, x_orig=None):
"""Map Function to convert image to channel first."""
if self.input_image_type != "grayscale":
x = tf.reverse(x, axis=[-1])
if y is not None:
return x, y, x_orig
return x
def cast_img_lbl_dtype_tf(self, img, label=None, x_orig=None):
"""Map Function to cast labels to float32."""
img_cast = tf.cast(img, dtype="float32")
if label is not None:
label_cast = tf.cast(label, dtype="float32")
return img_cast, label_cast, x_orig
return img_cast
def resize_image_helper(self, img):
"""Helper function to resize the input image."""
resize_methods = {'BILINEAR': tf.image.ResizeMethod.BILINEAR,
'NEAREST_NEIGHBOR': tf.image.ResizeMethod.NEAREST_NEIGHBOR,
'BICUBIC': tf.image.ResizeMethod.BICUBIC,
'AREA': tf.image.ResizeMethod.AREA}
if self.model_arch == "vanilla_unet":
img = tf.image.resize_images(img, (self.model_output_height,
self.model_output_width))
x = tf.image.resize_image_with_crop_or_pad(img, self.model_input_height,
self.model_input_width)
else:
if self.resize_padding:
x = tf.image.resize_image_with_pad(img,
target_height=self.model_input_height,
target_width=self.model_input_width,
method=resize_methods[self.resize_method])
else:
x = tf.image.resize_images(img, (self.model_input_height,
self.model_input_width),
method=resize_methods[self.resize_method])
return x
def resize_image_and_label_tf(self, img, label=None, x_orig=None):
"""Map Function to preprocess and resize images/ labels."""
x = self.resize_image_helper(img)
if x_orig is not None:
x_orig = self.resize_image_helper(x_orig)
if label is not None:
if self.model_arch == "vanilla_unet":
y = tf.image.resize_images(label, (self.model_output_height,
self.model_output_width))
y = tf.image.resize_image_with_crop_or_pad(y, self.model_input_height,
self.model_input_width)
else:
# Labels should be always nearest neighbour, as they are integers.
if self.resize_padding:
y = tf.image.resize_image_with_pad(
label, target_height=self.model_output_height,
target_width=self.model_output_width,
method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
else:
y = tf.image.resize_images(
label, (self.model_output_height, self.model_output_width),
method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
return x, y, x_orig
return x
def normalize_img_tf(self, img_tensor, y=None, x_orig=None):
"""Map Function to normalize input image."""
if self.input_image_type != "grayscale":
# img_tensor = tf.divide(img_tensor, 127.5) - 1
if self.preprocess == "div_by_255":
# A way to normalize an image tensor by dividing them by 255.
# This assumes images with max pixel value of
# 255. It gives normalized image with pixel values in range of >=0 to <=1.
img_tensor /= 255.0
elif self.preprocess == "min_max_0_1":
img_tensor /= 255.0
elif self.preprocess == "min_max_-1_1":
img_tensor = tf.divide(img_tensor, 127.5) - 1
if y is not None:
return img_tensor, y, x_orig
return img_tensor
def categorize_image_tf(self, img_tensor, number_of_classes, img_width,
img_height, data_format):
"""
Label Pre-processor.
Converts a 1 channel image tensor containing class values from 0 to N-1.
where N is total number of classes using TensorFlow.
:param img_tensor: Input image tensor.
:param number_of_classes: Total number of classes.
:param img_width: The width of input the image.
:param img_height: The height of input image.
:param data_format: Either channels_last or channels_first way of storing data.
:return: Categorized image tensor with as many channels as there were number of classes.
Each channel would have a 1 if the class was present otherwise a 0.
"""
# Here we assume the image_tensor to have an tf.uint8 dtype. No asserts have been put.
# Flatten the image out first.
if self.input_image_type == "grayscale":
cond = tf.less(img_tensor, 0.5 * tf.ones(tf.shape(img_tensor)))
img_tensor = tf.where(cond, tf.zeros(tf.shape(img_tensor)),
tf.ones(tf.shape(img_tensor)))
labels = tf.cast(img_tensor, tf.int32)
if self.num_classes != 1:
# We need not do one-hot vectorization if num classes > 1
labels = tf.one_hot(labels, self.num_classes,
axis=0)
labels = tf.cast(labels, tf.float32)
labels = tf.reshape(labels,
[img_height, img_width, number_of_classes] if
data_format == "channels_last" else
[number_of_classes, img_width, img_height])
labels = tf.cast(labels, dtype="float32")
return labels
img_flatten = tf.reshape(img_tensor, [-1])
img_flatten_uint8 = tf.cast(img_flatten, tf.uint8)
# Give it to one hot.
img_cat = img_flatten_uint8
if self.num_classes != 1:
img_cat = tf.one_hot(img_flatten_uint8, depth=number_of_classes, axis=-1
if data_format == "channels_last" else 0,
dtype=img_flatten_uint8.dtype)
im_cat_dtype_cast = tf.cast(img_cat, img_tensor.dtype)
# Un-flatten it back.
img_cat_unflatten = tf.reshape(im_cat_dtype_cast,
[img_height, img_width, number_of_classes]
if data_format == "channels_last" else
[number_of_classes, img_height, img_width])
img_cat_unflatten = tf.cast(img_cat_unflatten, dtype="float32")
return img_cat_unflatten
def dictionarize_labels_eval(self, x, y=None, x_orig=None):
"""Map Function to return labels for evaluation."""
x_dic = {"x_orig": x_orig, "features": x, "labels": y}
return x_dic, y
def transpose_to_nchw(self, x, y=None, x_orig=None):
"""Map function image to first channel."""
x = tf.transpose(x, perm=[2, 0, 1]) # Brings channel dimension to first. from HWC to CHW.
if y is not None:
y = tf.transpose(y, perm=[2, 0, 1])
return x, y, x_orig
return x
def prednn_categorize_label(self, img, label_img, x_orig=None):
"""Map function to convert labels to integer labels."""
if label_img is not None:
return img, self.categorize_image_tf(label_img, number_of_classes=self.num_classes,
img_width=self.model_output_width,
img_height=self.model_output_height,
data_format="channels_first"), x_orig
return img
def input_fn(self, drop_remainder=False):
"""Function to input images and labels."""
return self.input_fn_aigs_tf()
def input_fn_aigs_tf(self):
"""Input function for training."""
dataset = tf.data.Dataset.from_tensor_slices((self.image_names_list, self.masks_names_list))
if self.phase == "train":
dataset = dataset.shuffle(buffer_size=self.buffer_size,
seed=self._seed,
reshuffle_each_iteration=True)
dataset = dataset.shard(self._num_gpus, self._gpu_id)
dataset = dataset.map(self.read_image_and_label_tensors,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
dataset = dataset.map(lambda x, y=None,
x_orig=None: self.apply_label_mapping_tf(x, y, x_orig),
num_parallel_calls=tf.data.experimental.AUTOTUNE)
dataset = dataset.map(self.rgb_to_bgr_tf,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
dataset = dataset.map(self.cast_img_lbl_dtype_tf,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
dataset = dataset.map(self.resize_image_and_label_tf,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
dataset = dataset.map(lambda x, y=None, x_orig=None: self.normalize_img_tf(x, y, x_orig),
num_parallel_calls=tf.data.experimental.AUTOTUNE)
if self.phase == "train":
dataset = dataset.map(lambda x, y=None, x_orig=None: self.augment(x, y, x_orig),
num_parallel_calls=tf.data.experimental.AUTOTUNE)
dataset = dataset.map(lambda x, y=None, x_orig=None: self.resize_vanilla(x, y, x_orig),
num_parallel_calls=tf.data.experimental.AUTOTUNE)
dataset = dataset.map(self.transpose_to_nchw,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
dataset = dataset.map(lambda x, y=None,
x_orig=None: self.prednn_categorize_label(x, y, x_orig),
num_parallel_calls=tf.data.experimental.AUTOTUNE)
dataset = dataset.apply(tf.data.experimental.ignore_errors())
if self.phase == "train":
dataset = dataset.repeat()
dataset = dataset.map(lambda x, y=None,
x_orig=None: self.dictionarize_labels_eval(x, y, x_orig),
num_parallel_calls=tf.data.experimental.AUTOTUNE)
dataset = dataset.batch(self._batch_size)
dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE)
if self.data_options:
dataset = dataset.with_options(self._data_options)
return dataset
def eval_fn(self, count=1):
"""Input function for Evaluation."""
return self.input_fn()
def test_fn(self, count=1):
"""Input function for Testing."""
dataset = tf.data.Dataset.from_tensor_slices((self.image_names_list))
dataset = dataset.shard(self._num_gpus, self._gpu_id)
dataset = dataset.map(self.read_image_and_label_tensors,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
dataset = dataset.map(self.rgb_to_bgr_tf,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
dataset = dataset.map(self.cast_img_lbl_dtype_tf,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
dataset = dataset.map(self.resize_image_and_label_tf,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
dataset = dataset.map(lambda x, y=None, x_orig=None: self.normalize_img_tf(x),
num_parallel_calls=tf.data.experimental.AUTOTUNE)
dataset = dataset.map(lambda x, y=None, x_orig=None: self.resize_vanilla(x, y),
num_parallel_calls=tf.data.experimental.AUTOTUNE)
dataset = dataset.map(self.transpose_to_nchw,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
dataset = dataset.apply(tf.data.experimental.ignore_errors())
dataset = dataset.batch(self._batch_size)
dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE)
return dataset
@property
def _data_options(self):
"""Constructs tf.data.Options for this dataset."""
data_options = tf.data.Options()
data_options.experimental_optimization.parallel_batch = True
data_options.experimental_slack = True
data_options.experimental_optimization.map_parallelization = True
map_vectorization_options = tf.data.experimental.MapVectorizationOptions()
map_vectorization_options.enabled = True
map_vectorization_options.use_choose_fastest = True
data_options.experimental_optimization.map_vectorization = map_vectorization_options
return data_options
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/unet/utils/data_loader.py |
# Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
"""IVA Unet utility functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/unet/utils/__init__.py |
# Copyright 2019-2020 NVIDIA Corporation. All rights reserved.
"""UNet TensorRT inference."""
import os
import numpy as np
import tensorflow as tf
from tqdm import tqdm
class Inferencer(object):
"""Manages TensorRT objects for model inference."""
def __init__(self, keras_model=None, batch_size=None, trt_engine_path=None,
dataset=None, activation="softmax", num_conf_mat_classes=None):
"""Initializes Keras / TensorRT objects needed for model inference.
Args:
keras_model (keras model or None): Keras model object for inference
batch_size (int or None): an int if keras_model is present
trt_engine_path (str or None): TensorRT engine path.
dataset (class object): Dataset class object.
activation (string): activation used in the model
"""
if trt_engine_path is not None:
# use TensorRT for inference
# Import TRTInferencer only if it's a TRT Engine.
# Note: import TRTInferencer after fork() or in MPI might fail.
from nvidia_tao_tf1.cv.common.inferencer.trt_inferencer import TRTInferencer
self.trt_inf = TRTInferencer(trt_engine_path, batch_size=batch_size)
self.batch_size = self.trt_inf.max_batch_size
self.trt_inf = TRTInferencer(trt_engine_path, batch_size=self.batch_size)
self.model_input_height = self.trt_inf._input_shape[1]
self.model_input_width = self.trt_inf._input_shape[2]
self.pred_fn = self.trt_inf.infer_batch
else:
raise ValueError("Need trt_engine_path.")
self.supported_img_format = ['.jpg', '.jpeg', '.JPG', '.JPEG', '.png', '.PNG']
self.dataset = dataset
self.activation = activation
self.num_conf_mat_classes = num_conf_mat_classes
self.session = self.set_session()
def set_session(self):
'''Helper function to set TF operations to CPU.'''
# Configuring tensorflow to use CPU so that is doesn't interfere
# with tensorrt.
device_count = {'GPU': 0, 'CPU': 1}
session_config = tf.compat.v1.ConfigProto(
device_count=device_count
)
session = tf.compat.v1.Session(
config=session_config,
graph=tf.get_default_graph()
)
return session
def _load_img(self, img_filename):
"""load an image and returns the original image and a numpy array for model to consume.
Args:
img_filename (str): path to an image
Returns:
inputs: Pre-processed numpy array
"""
inputs = self.dataset.read_image_and_label_tensors(img_filename)
inputs = self.dataset.rgb_to_bgr_tf(inputs)
inputs = self.dataset.cast_img_lbl_dtype_tf(inputs)
inputs = self.dataset.resize_image_and_label_tf(inputs)
inputs = self.dataset.normalize_img_tf(inputs)
inputs = self.dataset.transpose_to_nchw(inputs)
inputs = inputs.eval(session=self.session)
inputs = np.array(inputs)
return inputs
def _predict_batch(self, inf_inputs):
'''function to predict a batch.'''
inf_inputs_np = np.array(inf_inputs)
y_pred = self.pred_fn(inf_inputs_np)
predictions_batch = self.infer_process_fn(y_pred)
return predictions_batch
def infer_process_fn(self, y_pred):
'''Post process the TRT inference output by reshaping.'''
predictions_batch = []
for idx in range(y_pred[0].shape[0]):
pred = np.reshape(y_pred[0][idx, ...], (self.dataset.model_output_height,
self.dataset.model_output_width,
1))
pred = np.squeeze(pred, axis=-1)
if self.activation == "sigmoid":
pred = np.where(pred > 0.5, 1, 0)
pred = pred.astype(np.uint8)
pred_dic = {"logits": pred}
predictions_batch.append(pred_dic)
return predictions_batch
def _inference_folder(self, img_names_list):
"""inference in a folder.
Args:
img_in_path: the input folder path for an image
"""
predictions = []
full_img_paths = []
n_batches = (len(img_names_list) + self.batch_size - 1) // self.batch_size
for batch_idx in tqdm(range(n_batches)):
inf_inputs = []
for img_path in img_names_list[
batch_idx*self.batch_size:(batch_idx+1)*self.batch_size
]:
_, ext = os.path.splitext(img_path)
if ext not in self.supported_img_format:
raise ValueError("Provided image format {} is not supported!".format(ext))
inf_input = self._load_img(img_path)
inf_inputs.append(inf_input)
full_img_paths.append(img_path)
y_pred_batch = self._predict_batch(inf_inputs)
predictions += y_pred_batch
return predictions, full_img_paths
def infer(self, image_names_list):
"""Wrapper function."""
if not image_names_list:
raise ValueError("Image input folder for inference should not be empty!")
predictions, full_img_paths = self._inference_folder(image_names_list)
self.session.close()
return predictions, full_img_paths
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/unet/utils/inference_trt.py |
"""Utilities for distributed execution."""
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
__all__ = ["MPI_local_rank", "MPI_rank", "MPI_size", "MPI_rank_and_size", "MPI_is_distributed"]
def MPI_is_distributed():
"""Return a boolean whether a distributed training/inference runtime is being used."""
return all([var in os.environ for var in ["OMPI_COMM_WORLD_RANK", "OMPI_COMM_WORLD_SIZE"]])
def MPI_local_rank():
"""Local rank."""
if "OMPI_COMM_WORLD_LOCAL_RANK" in os.environ:
return int(os.environ.get("OMPI_COMM_WORLD_LOCAL_RANK"))
return 0
def MPI_rank():
"""MPI rank."""
return MPI_rank_and_size()[0]
def MPI_size():
"""MPI size."""
return MPI_rank_and_size()[1]
def MPI_rank_and_size():
"""MPI rank and size."""
if "tensorflow" in sys.modules:
return mpi_env_MPI_rank_and_size()
return 0, 1
# Source: https://github.com/horovod/horovod/blob/c3626e/test/common.py#L25
def mpi_env_MPI_rank_and_size():
"""Get MPI rank and size from environment variables and return them as a tuple of integers.
Most MPI implementations have an `mpirun` or `mpiexec` command that will
run an MPI executable and set up all communication necessary between the
different processors. As part of that set up, they will set environment
variables that contain the rank and size of the MPI_COMM_WORLD
communicator. We can read those environment variables from Python in order
to ensure that `hvd.rank()` and `hvd.size()` return the expected values.
Since MPI is just a standard, not an implementation, implementations
typically choose their own environment variable names. This function tries
to support several different implementation, but really it only needs to
support whatever implementation we want to use for the TensorFlow test
suite.
If this is not running under MPI, then defaults of rank zero and size one
are returned. (This is appropriate because when you call MPI_Init in an
application not started with mpirun, it will create a new independent
communicator with only one process in it.)
Source: https://github.com/horovod/horovod/blob/c3626e/test/common.py#L25
"""
rank_env = 'PMI_RANK OMPI_COMM_WORLD_RANK'.split()
size_env = 'PMI_SIZE OMPI_COMM_WORLD_SIZE'.split()
for rank_var, size_var in zip(rank_env, size_env):
rank = os.environ.get(rank_var)
size = os.environ.get(size_var)
if rank is not None and size is not None:
return int(rank), int(size)
# Default to rank zero and size one if there are no environment variables
return 0, 1
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/unet/utils/distributed_utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Model function defining the model, loss and outputs.
This function provides the model function that instantiates the unet model
along with loss, model hyper-parameters and predictions.
"""
import logging
import os
import random
import numpy as np
import tensorflow as tf
from nvidia_tao_tf1.cv.unet.distribution import distribution
from nvidia_tao_tf1.cv.unet.proto.regularizer_config_pb2 import RegularizerConfig
INFREQUENT_SUMMARY_KEY = "infrequent_summary"
FREQUENT_SUMMARY_KEY = "frequent_summary"
logger = logging.getLogger(__name__)
# Class Dice coefficient averaged over batch
def dice_coef(predict, target, axis=1, eps=0):
"""helper function to compute the dice coefficient."""
intersection = tf.reduce_sum(predict * target, axis=axis)
mask_sum = tf.reduce_sum(predict * predict + target * target, axis=axis)
dice = (2. * intersection + eps) / (mask_sum + eps)
dice_coef = tf.reduce_mean(dice, axis=0)
return dice_coef # average over batch
def regularization_l2loss(weight_decay):
"""helper function to compute regularization loss."""
def loss_filter_fn(name):
"""we don't need to compute L2 loss for BN."""
return all([
tensor_name not in name.lower()
for tensor_name in ["batchnorm", "batch_norm", "batch_normalization"]
])
filtered_params = [tf.cast(v, tf.float32) for v in tf.trainable_variables()
if loss_filter_fn(v.name)]
if len(filtered_params) != 0:
l2_loss_per_vars = [tf.nn.l2_loss(v) for v in filtered_params]
l2_loss = tf.multiply(tf.add_n(l2_loss_per_vars), weight_decay)
else:
l2_loss = tf.zeros(shape=(), dtype=tf.float32)
return l2_loss
def is_using_hvd():
"""Function to determine if the hvd is used."""
env_vars = ["OMPI_COMM_WORLD_RANK", "OMPI_COMM_WORLD_SIZE"]
if all([var in os.environ for var in env_vars]):
return True
return False
def get_learning_rate(lr_init, params, global_step):
"""Function to determine the learning rate based on scheduler."""
if params.lr_scheduler:
if params.lr_scheduler.WhichOneof("lr_scheduler") == 'cosine_decay':
return tf.compat.v1.train.cosine_decay(
lr_init, global_step, params.lr_scheduler.cosine_decay.decay_steps,
alpha=params.lr_scheduler.cosine_decay.alpha, name=None)
if params.lr_scheduler.WhichOneof("lr_scheduler") == 'exponential_decay':
return tf.compat.v1.train.exponential_decay(
lr_init, global_step, params.lr_scheduler.exponential_decay.decay_steps,
decay_rate=params.lr_scheduler.exponential_decay.decay_rate,
staircase=True, name=None)
raise NotImplementedError('The provided learning rate scheduler is not supported.')
# Return constant learning rate
return lr_init
def tensorboard_visualize(tensor, tensor_name, visualize):
"""Helper function to visualize the tensors on Tensorboard."""
frequent_collections = [FREQUENT_SUMMARY_KEY]
if visualize:
tf.identity(tensor, name=tensor_name)
tf.summary.scalar(tensor_name, tensor, collections=frequent_collections)
def get_logits(output_map_activation, params):
"""Function to compute logits."""
# Return the predictions which is class integer map
if params["activation"] == "sigmoid":
cond = tf.less(output_map_activation, 0.5 * tf.ones(tf.shape(output_map_activation)))
logits = tf.where(cond, tf.zeros(tf.shape(output_map_activation)),
tf.ones(tf.shape(output_map_activation)))
else:
logits = tf.compat.v1.argmax(output_map_activation, axis=1)
return logits
def get_color_id(num_classes):
"""Function to return a list of color values for each class."""
colors = []
for idx in range(num_classes):
random.seed(idx)
colors.append((random.randint(0, 255), random.randint(0, 255), random.randint(0, 255)))
colors_2 = [(100, 100, 100), (255, 0, 0)]
colors = colors_2 + list(set(colors) - set(colors_2))
return colors
def convert_class_id_to_color(tensor, palette):
"""Function to convert class ID map to color img."""
H, W = tf.shape(tensor)[1], tf.shape(tensor)[2]
palette = tf.constant(palette, dtype=tf.uint8)
flat_logits = tf.reshape(tensor,
[tf.shape(tensor)[0], -1])
color_image = tf.gather(palette, flat_logits)
color_image = tf.reshape(color_image, [-1, H, W, 3])
return color_image
def visualize_image_color(tensor, x_orig, num_classes, labels_gt):
"""Fnction to visualize the prediction on the input image during trainin on TB."""
image_collections = [INFREQUENT_SUMMARY_KEY]
colors = get_color_id(num_classes)
tf.summary.image("input_image", x_orig, collections=image_collections)
palette = np.array(colors, np.uint8)
color_image = convert_class_id_to_color(tensor, palette)
color_image_gt = convert_class_id_to_color(labels_gt, palette)
tf.summary.image("predicted_image", color_image, collections=image_collections)
color_image_vis = color_image/2
x_orig_vis = x_orig/2
overlay_img = x_orig_vis + color_image_vis
overlay_img_gt = color_image_gt/2 + x_orig_vis
tf.summary.image("predicted_overlay", overlay_img, collections=image_collections)
tf.summary.image("groundtruth_overlay", overlay_img_gt, collections=image_collections)
def unet_fn(features, labels, mode, params):
"""Model function for tf.Estimator.
Controls how the training is performed by specifying how the
total_loss is computed and applied in the backward pass.
Args:
features (tf.Tensor): Tensor samples
labels (tf.Tensor): Tensor labels
mode (tf.estimator.ModeKeys): Indicates if we train, evaluate or predict
params (dict): Additional parameters supplied to the estimator
Returns:
Appropriate tf.estimator.EstimatorSpec for the current mode
"""
dtype = tf.float32
logger.info(params)
device = '/gpu:0'
global_step = tf.compat.v1.train.get_global_step()
if mode == tf.estimator.ModeKeys.TRAIN:
lr = get_learning_rate(params.learning_rate, params, global_step)
if isinstance(features, dict):
features_new = features['features'], features['labels'], features['x_orig']
features, labels_pred, x_orig = features_new
with tf.device(device):
# Model function is instantitated on One GPU.
features = tf.cast(features, dtype)
unet_model = params.unet_model
# Constructing the unet model
img_height, img_width, img_channels = \
params.experiment_spec.model_config.model_input_height, \
params.experiment_spec.model_config.model_input_width, \
params.experiment_spec.model_config.model_input_channels
unet_model.construct_model(input_shape=(img_channels, img_height, img_width),
pretrained_weights_file=params.pretrained_weights_file,
enc_key=params.key, model_json=params.model_json,
custom_objs=params.custom_objs)
unet_model.keras_model.summary()
output_map = unet_model.keras_model(features)
if params["activation"] == "sigmoid":
output_map_activation = tf.math.sigmoid(output_map)
else:
output_map_activation = tf.nn.softmax(output_map, axis=1)
if params.visualize and params.phase == "train":
# For GT vis
labels_gt = labels
if params["activation"] == "softmax":
labels_gt = tf.compat.v1.argmax(labels_gt, axis=1)
labels_gt = tf.cast(labels_gt, tf.int64)
logits_img = get_logits(output_map_activation, params)
logits_img = tf.expand_dims(logits_img, axis=-1)
logits_img = tf.cast(logits_img, tf.int64)
visualize_image_color(logits_img, x_orig, params.num_classes, labels_gt)
if mode == tf.estimator.ModeKeys.PREDICT:
logits = get_logits(output_map_activation, params)
if params.phase == "test":
predictions = {"logits": logits}
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)
# For evaluation
if params["activation"] == "softmax":
labels_pred = tf.compat.v1.argmax(labels_pred, axis=1)
flat_logits = tf.reshape(tf.cast(logits, tf.float32),
[tf.shape(logits)[0], -1])
flat_labels = tf.reshape(labels_pred,
[tf.shape(labels_pred)[0], -1])
elems = (flat_labels, flat_logits)
conf_matrix = tf.map_fn(lambda x: tf.math.confusion_matrix(x[0], x[1],
num_classes=params.num_conf_mat_classes,
dtype=tf.float32), elems, dtype=tf.float32)
predictions = {'conf_matrix': conf_matrix}
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)
# Transpose the output map
trainable_variables = tf.compat.v1.trainable_variables()
if params.experiment_spec.training_config.regularizer.type == RegularizerConfig.L2:
regularization_loss = params.weight_decay * tf.add_n(
[tf.nn.l2_loss(v) for v in trainable_variables if not
any([pattern in v.name for pattern in
["batch_normalization", "bias", "beta"]])])
elif params.experiment_spec.training_config.regularizer.type == RegularizerConfig.L1:
l1_regularizer = tf.contrib.layers.l1_regularizer(scale=params.weight_decay, scope=None)
regularization_loss = tf.contrib.layers.apply_regularization(
l1_regularizer, [v for v in trainable_variables if not
any([pattern in v.name for pattern in
["batch_normalization", "bias", "beta"]])])
else:
# Setting reg to 0 when no regularization is provided
l1_regularizer = tf.contrib.layers.l1_regularizer(scale=0.0, scope=None)
regularization_loss = tf.contrib.layers.apply_regularization(
l1_regularizer, trainable_variables)
# Debug the tensors for NaN
if params.visualize and params.weights_monitor:
# Visualize the weights and gradients
histogram_collections = [INFREQUENT_SUMMARY_KEY]
for tr_v in trainable_variables:
tf.debugging.check_numerics(tr_v, message='Output map had NaN/ \
Infinity values.')
tf.compat.v1.verify_tensor_all_finite(tr_v, msg="Nan")
tf.summary.histogram(tr_v.name, tr_v, collections=histogram_collections)
if params.activation == "sigmoid":
crossentropy_loss = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(logits=output_map,
labels=labels),
name='cross_loss_ref')
else:
crossentropy_loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits_v2(logits=output_map,
labels=labels,
axis=1),
name='cross_loss_ref')
if params.loss in ["cross_dice_sum", "dice"]:
dice_loss = tf.reduce_mean(1 - dice_coef(output_map_activation, labels),
name='dice_loss')
tensorboard_visualize(dice_loss, 'dice_loss', params.visualize)
if params.loss == "cross_dice_sum":
total_loss = tf.add(crossentropy_loss, dice_loss)
tensorboard_visualize(total_loss, 'cross_dice_loss', params.visualize)
total_loss = tf.add(total_loss, regularization_loss, name="total_loss_ref")
elif params.loss == "cross_entropy":
tensorboard_visualize(crossentropy_loss, 'crossentropy_loss', params.visualize)
total_loss = tf.add(crossentropy_loss, regularization_loss, name="total_loss_ref")
elif params.loss == "dice":
total_loss = tf.add(dice_loss, regularization_loss, name="total_loss_ref")
tensorboard_visualize(total_loss, 'total_loss', params.visualize)
tensorboard_visualize(regularization_loss, 'regularization_loss', params.visualize)
hooks = []
if params.visualize:
events_dir = os.path.join(params.model_dir, "events")
save_steps_frequent = params.save_summary_steps
save_steps_infrequent = params.infrequent_save_summary_steps
infrequent_summary_hook = tf.train.SummarySaverHook(
save_steps=save_steps_infrequent,
output_dir=events_dir,
scaffold=tf.train.Scaffold(
summary_op=tf.summary.merge_all(key=INFREQUENT_SUMMARY_KEY)))
frequent_summary_hook = tf.train.SummarySaverHook(
save_steps=save_steps_frequent, output_dir=events_dir,
scaffold=tf.train.Scaffold(
summary_op=tf.summary.merge_all(key=FREQUENT_SUMMARY_KEY)))
hooks += [infrequent_summary_hook, frequent_summary_hook]
if mode == tf.estimator.ModeKeys.EVAL:
eval_metric_ops = {"eval_ce_loss": tf.compat.v1.metrics.mean(crossentropy_loss),
"eval_dice_loss": tf.compat.v1.metrics.mean(dice_loss),
"eval_total_loss": tf.compat.v1.metrics.mean(total_loss),
"eval_dice_score": tf.compat.v1.metrics.mean(1.0 - dice_loss)
}
return tf.estimator.EstimatorSpec(mode=mode, loss=dice_loss,
eval_metric_ops=eval_metric_ops)
opt = tf.compat.v1.train.AdamOptimizer(learning_rate=lr)
if distribution.get_distributor().is_distributed():
opt = distribution.get_distributor().distribute_optimizer(opt)
with tf.control_dependencies(
tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.UPDATE_OPS)):
deterministic = True
gate_gradients = (
tf.compat.v1.train.Optimizer.GATE_OP
if deterministic
else tf.compat.v1.train.Optimizer.GATE_NONE)
train_op = opt.minimize(total_loss, gate_gradients=gate_gradients,
global_step=global_step)
return tf.estimator.EstimatorSpec(mode, loss=total_loss, train_op=train_op,
training_hooks=hooks,
eval_metric_ops={})
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/unet/utils/model_fn.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test UNet dataloader."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import pytest
import tensorflow as tf
from nvidia_tao_tf1.cv.unet.model.build_unet_model import build_model, select_model_proto
from nvidia_tao_tf1.cv.unet.model.utilities import build_target_class_list, get_num_unique_train_ids
from nvidia_tao_tf1.cv.unet.model.utilities import get_train_class_mapping
from nvidia_tao_tf1.cv.unet.model.utilities import initialize, initialize_params
from nvidia_tao_tf1.cv.unet.model.utilities import update_model_params
from nvidia_tao_tf1.cv.unet.spec_handler.spec_loader import load_experiment_spec
from nvidia_tao_tf1.cv.unet.utils.data_loader import Dataset
tf.compat.v1.enable_eager_execution()
@pytest.mark.skipif(os.getenv("RUN_ON_CI", "0") == "1", reason="Cannot be run on CI")
@pytest.mark.parametrize("data_config",
[
'../../unet/experiment_specs/test_isbi.txt',
'../../unet/experiment_specs/test_peoplesemseg.txt'
])
def test_unet_dataloader(data_config):
bs = 1
file_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
spec_path = os.path.join(file_path, data_config)
experiment_spec = load_experiment_spec(spec_path, merge_from_default=False)
# Initialize the environment
initialize(experiment_spec)
# Initialize Params
params = initialize_params(experiment_spec)
target_classes = build_target_class_list(
experiment_spec.dataset_config.data_class_config)
num_classes = get_num_unique_train_ids(target_classes)
target_classes_train_mapping = get_train_class_mapping(target_classes)
print(target_classes_train_mapping)
model_config = select_model_proto(experiment_spec)
unet_model = build_model(m_config=model_config,
target_class_names=target_classes)
params = update_model_params(params=params, unet_model=unet_model,
experiment_spec=experiment_spec,
target_classes=target_classes
)
dataset = Dataset(batch_size=bs,
fold=params.crossvalidation_idx,
augment=params.augment,
params=params,
phase="train",
target_classes=target_classes)
dataset_iterator = dataset.input_fn()
iterator = dataset_iterator.make_one_shot_iterator()
data, data_y = iterator.get_next()
data_X = data["features"]
img_arr_np, mask_arr_np = data_X.numpy(), data_y.numpy()
assert(img_arr_np.shape == (1, model_config.model_input_channels,
model_config.model_input_height,
model_config.model_input_width))
assert(mask_arr_np.shape == (1, num_classes, model_config.model_input_height,
model_config.model_input_width))
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/unet/utils/tests/test_dataloader.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""IVA checkpoint hook for tlt files."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tensorflow as tf
from tensorflow.python.platform import tf_logging as logging
from nvidia_tao_tf1.core.decorators import override, subclass
INFREQUENT_SUMMARY_KEY = b'infrequent_summary'
@subclass
class IVACheckpointSaverHook(tf.estimator.CheckpointSaverHook):
"""Saves time files only for every N steps or seconds."""
def __init__(self,
checkpoint_dir,
save_secs=None,
save_steps=None,
model_json=None,
saver=None,
checkpoint_basename="model.ckpt",
steps_per_epoch=None,
scaffold=None,
listeners=None,
load_graph=False):
"""Initialize an IVACheckpointSaverHook.
Args:
checkpoint_dir (str): Base directory for the checkpoint files.
save_secs (int): Save every N secs.
save_steps (int): Save every N steps.
saver (Saver): Object used for saving.
checkpoint_basename (str): Base name for the checkpoint files.
scaffold (Scaffold): Use to get saver object.
listeners (list of CheckpointSaverListener): Subclass instances.
Used for callbacks that run immediately before or after this hook saves
the checkpoint.
Raises:
ValueError: One of `save_steps` or `save_secs` should be set.
ValueError: At most one of `saver` or `scaffold` should be set.
"""
# Initialize the parent class.
super(IVACheckpointSaverHook, self).__init__(checkpoint_dir,
save_secs=save_secs,
save_steps=save_steps,
saver=saver,
checkpoint_basename=checkpoint_basename,
scaffold=scaffold,
listeners=listeners)
self.model_json = model_json
self.load_graph = load_graph
self.steps_per_epoch = steps_per_epoch
@override
def _save(self, session, step):
"""Saves the latest checkpoint, returns should_stop."""
logging.info("Saving checkpoints for step-%d.", step)
# Saving the keras model.
for l in self._listeners:
l.before_save(session, step)
should_stop = False
# Setting up checkpoint saving.
self._save_checkpoint(session, step)
for l in self._listeners:
if l.after_save(session, step):
logging.info(
"A CheckpointSaverListener requested that training be stopped. "
"listener: {}".format(l))
should_stop = True
return should_stop
def _save_checkpoint(self, session, step):
"""Saves the checkpoint."""
saver = self._get_saver()
epoch = int(step / self.steps_per_epoch)
ckzip_folder = os.path.join(self._checkpoint_dir, 'model.epoch-{}.tlt'.format(epoch))
if not os.path.isdir(ckzip_folder):
os.makedirs(ckzip_folder)
# Saving session to the zip file.
saver.save(session, os.path.join(ckzip_folder, "model.ckpt"), global_step=epoch)
if self.model_json and self.load_graph:
with open(self.model_json, 'r') as json_file:
json_savedModel = json_file.read()
with open(os.path.join(ckzip_folder, "model.json"), 'w') as json_file:
json_file.write(json_savedModel)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/unet/hooks/checkpoint_saver_hook.py |
"""Pretrained weight loading hooks."""
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import re
import sys
import tensorflow as tf
__all__ = ["PretrainedWeightsLoadingHook"]
logger = logging.getLogger(__name__)
tf.logging.set_verbosity(tf.logging.WARN)
# pylint: disable=protected-access
# Currently variable_scope doesn't provide very good APIs to access
# all variables under scope and retrieve and check existing scopes.
def get_variable_full_name(var):
"""Returns the full name of a variable.
For normal Variables, this is the same as the var.op.name. For
sliced or PartitionedVariables, this name is the same for all the
slices/partitions. In both cases, this is normally the name used in
a checkpoint file.
Args:
var: A `Variable` object.
Returns:
A string that is the full name.
"""
if var._save_slice_info:
return var._save_slice_info.full_name
return var.op.name
def assign_from_checkpoint(model_path, var_list, ignore_missing_vars=False, remove_head=False):
"""Creates an operation to assign specific variables from a checkpoint.
Args:
model_path: The full path to the model checkpoint. To get latest checkpoint
use `model_path = tf.train.latest_checkpoint(checkpoint_dir)`
var_list: A list of (possibly partitioned) `Variable` objects or a
dictionary mapping names in the checkpoint to the corresponding variables
or list of variables to initialize from that checkpoint value. For
partitioned Variables, the name in the checkpoint must be the full
variable, not the name of the partitioned variable, eg. "my_var" rather
than "my_var/part_4". If empty, returns no_op(), {}.
ignore_missing_vars: Boolean, if True ignore variables missing in the
checkpoint with a warning instead of failing.
Returns:
the restore_op and the feed_dict that need to be run to restore var_list.
Raises:
ValueError: If `ignore_missing_vars` is False and the checkpoint specified
at `model_path` is missing one of the variables in `var_list`.
"""
# Normalize var_list into a dictionary mapping names in the
# checkpoint to the list of variables to initialize from that
# checkpoint variable. Sliced (including partitioned) variables will
# end up under the same key.
grouped_vars = {}
if isinstance(var_list, (tuple, list)):
for var in var_list:
ckpt_name = get_variable_full_name(var)
if ckpt_name not in grouped_vars:
grouped_vars[ckpt_name] = []
grouped_vars[ckpt_name].append(var)
else:
var_list_items = list(var_list.items())
# The last 4 graph nodes will correspond to the layer producing the
# final output with channels equal to the num classes. Hence removing them
# to do transfer learning.
var_list_items = var_list_items[:-4]
for var_list_item in var_list_items:
ckpt_name, value = var_list_item
if isinstance(value, (tuple, list)):
grouped_vars[ckpt_name] = value
else:
grouped_vars[ckpt_name] = [value]
# Read each checkpoint entry. Create a placeholder variable and
# add the (possibly sliced) data from the checkpoint to the feed_dict.
reader = tf.compat.v1.train.NewCheckpointReader(model_path)
feed_dict = {}
assign_ops = []
for ckpt_name in grouped_vars:
if not reader.has_tensor(ckpt_name):
log_str = 'Checkpoint is missing variable [%s]' % ckpt_name
if ignore_missing_vars:
logger.debug(log_str)
continue
# @vpraveen: Removing the else coming from
# pylint errors.
raise ValueError(log_str)
ckpt_value = reader.get_tensor(ckpt_name)
if remove_head:
restore_variables = grouped_vars[ckpt_name][:-1]
else:
restore_variables = grouped_vars[ckpt_name]
for var in restore_variables:
placeholder_tensor = tf.compat.v1.placeholder(
dtype=var.dtype.base_dtype,
shape=var.get_shape(),
name='placeholder/' + var.op.name
)
assign_ops.append(var.assign(placeholder_tensor))
if not var._save_slice_info:
if var.get_shape() != ckpt_value.shape:
raise ValueError(
'Total size of new array must be unchanged for %s '
'lh_shape: [%s], rh_shape: [%s]' %
(ckpt_name, str(ckpt_value.shape), str(var.get_shape())))
feed_dict[placeholder_tensor] = ckpt_value.reshape(ckpt_value.shape)
else:
slice_dims = zip(var._save_slice_info.var_offset,
var._save_slice_info.var_shape)
slice_dims = [(start, start + size) for (start, size) in slice_dims]
slice_dims = [slice(*x) for x in slice_dims]
slice_value = ckpt_value[slice_dims]
slice_value = slice_value.reshape(var._save_slice_info.var_shape)
feed_dict[placeholder_tensor] = slice_value
print_op = tf.print(
"[GPU] Restoring pretrained weights from: %s" % (
model_path
),
output_stream=sys.stdout
)
with tf.control_dependencies([print_op]):
assign_op = tf.group(*assign_ops)
return assign_op, feed_dict
def build_assigment_map(prefix=None, skip_variables_regex=None):
"""Generate assigment map for loading checkpoints."""
all_vars = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.GLOBAL_VARIABLES, scope=prefix)
if not prefix:
prefix = ''
assignment_map = {}
for var in all_vars:
var_name = var.name
var_name_filter = (
var_name[-11:] in "/Momentum:0" or
var_name[-11:] in "/Adadelta:0" or
var_name[-13:] in "/Adadelta_1:0" or
var_name[-7:] in "/Adam:0" or
var_name[-9:] in "/Adam_1:0" or
var_name[-10:] in "/Adagrad:0" or
var_name[-10:] in "/RMSProp:0" or
var_name[-12:] in "/RMSProp_1:0" or
var_name[-16:] in "/LARSOptimizer:0"
)
if var_name_filter:
continue
# Trim the index of the variable.
if ':' in var_name:
var_name = var_name[:var_name.rindex(':')]
if skip_variables_regex and re.match(skip_variables_regex, var_name[len(prefix):]):
continue
assignment_map[var_name[len(prefix):]] = var
# assignment_map[var_name] = var
return assignment_map
class PretrainedWeightsLoadingHook(tf.estimator.SessionRunHook):
"""Hook for loading pretrained weights."""
def __init__(self, prefix, checkpoint_path, skip_variables_regex=None, remove_head=False):
"""Initialize."""
self._prefix = prefix
self._checkpoint_path = checkpoint_path
self._skip_variables_regex = skip_variables_regex
self._is_initialized = False
self._init_op = None
self._init_feed_dict = None
self.remove_head = remove_head
def begin(self):
"""Begin."""
vars_to_load = build_assigment_map(
prefix=self._prefix,
skip_variables_regex=self._skip_variables_regex
)
self._init_op, self._init_feed_dict = assign_from_checkpoint(
model_path=self._checkpoint_path,
var_list=vars_to_load,
ignore_missing_vars=True,
remove_head=self.remove_head
)
def after_create_session(self, session, coord=None):
"""Run session."""
if not self._is_initialized:
session.run(self._init_op, feed_dict=self._init_feed_dict)
logger.info("Pretrained weights loaded with success...\n")
self._is_initialized = True
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/unet/hooks/pretrained_restore_hook.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Useful hooks to the tensorflow session."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/unet/hooks/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Hook for resuming from the recent most checkpoint."""
import logging
import os
import tempfile
from zipfile import BadZipFile, ZipFile
from nvidia_tao_tf1.encoding import encoding
logger = logging.getLogger(__name__)
class LatestCheckpoint(object):
"""Latest checkpoint hook to retrieve the recent checkpoint."""
def __init__(self, key, model_dir):
"""Initialize LatestCheckpoint.
Args:
key (str): The key to encrypt the trained model.
model_dir (str): The path to retrieve the latest checkpoint saved.
"""
self._temp_dir = tempfile.mkdtemp()
self.ckpt = None
self.model_json = None
tmp_path, model_json, encrypted = self.get_latest_checkpoint(model_dir, key)
self.ckpt = tmp_path
self.model_json = model_json
if tmp_path and encrypted:
with open(os.path.join(self._temp_dir, "checkpoint"), "r") as f:
old_path = f.readline()
old_path = old_path.split(":")[-1]
old_dir = os.path.dirname(old_path)
self._temp_dir = old_dir
def get_latest_checkpoint(self, results_dir, key):
"""Get the latest checkpoint path from a given results directory.
Parses through the directory to look for the latest checkpoint file
and returns the path to this file.
Args:
results_dir (str): Path to the results directory.
Returns:
ckpt_path (str): Path to the latest checkpoint.
"""
# Adding this to avoid error on NGC when the results is not created yet
if not os.path.exists(results_dir):
return None, None, None
trainable_ckpts = [int(item.split('.')[1].split('-')[1])
for item in os.listdir(results_dir) if item.endswith(".tlt")]
num_ckpts = len(trainable_ckpts)
if num_ckpts == 0:
return None, None, None
latest_step = sorted(trainable_ckpts, reverse=True)[0]
latest_checkpoint = os.path.join(results_dir,
"model.epoch-{}.tlt".format(latest_step))
logger.info("Getting the latest checkpoint for restoring {}".format(latest_checkpoint))
return self.get_tf_ckpt(latest_checkpoint, key, latest_step)
def get_tf_ckpt(self, ckzip_path, enc_key, latest_step):
"""Simple function to extract and get a trainable checkpoint.
Args:
ckzip_path (str): Path to the encrypted checkpoint.
Returns:
tf_ckpt_path (str): Path to the decrypted tf checkpoint
"""
encrypted = False
if os.path.isdir(ckzip_path):
temp_checkpoint_path = ckzip_path
else:
encrypted = True
# Set-up the temporary directory.
temp_checkpoint_path = self._temp_dir
os_handle, temp_zip_path = tempfile.mkstemp()
temp_zip_path = temp_zip_path+".zip"
os.close(os_handle)
# Decrypt the checkpoint file.
with open(ckzip_path, 'rb') as encoded_file, open(temp_zip_path, 'wb') as tmp_zip_file:
encoding.decode(encoded_file, tmp_zip_file, bytes(enc_key, 'utf-8'))
encoded_file.closed
tmp_zip_file.closed
# Load zip file and extract members to a tmp_directory.
try:
with ZipFile(temp_zip_path, 'r') as zip_object:
for member in zip_object.namelist():
zip_object.extract(member, path=temp_checkpoint_path)
except BadZipFile:
raise ValueError(
"The zipfile extracted was corrupt. Please check your key or "
"re-launch the training."
)
except Exception:
raise IOError(
"The last checkpoint file is not saved properly. "
"Please delete it and rerun the script."
)
# Removing the temporary zip path.
os.remove(temp_zip_path)
json_files = [os.path.join(temp_checkpoint_path, f) for f in
os.listdir(temp_checkpoint_path) if f.endswith(".json")]
if len(json_files) > 0:
model_json = json_files[0]
else:
model_json = None
return os.path.join(temp_checkpoint_path,
"model.ckpt-{}".format(latest_step)), model_json, encrypted
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/unet/hooks/latest_checkpoint.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Hook to save the loss logs."""
from datetime import timedelta
import gc
import json
import os
import time
import tensorflow as tf
import nvidia_tao_tf1.cv.common.logging.logging as status_logging
from nvidia_tao_tf1.cv.unet.distribution import distribution
MONITOR_JSON_FILENAME = "monitor.json"
def write_monitor_json(
save_path, loss_value, running_avg_loss, current_epoch, max_epoch, time_per_epoch, ETA
):
"""Write the monitor.json file for cluster monitoring purposes.
Args:
save_path (str): Path where monitor.json needs to be saved. Basically the
result directory.
loss_value (float): Current value of loss to be recorder in the monitor.
current_epoch (int): Current epoch.
max_epoch (int): Total number of epochs.
time_per_epoch (float): Time per epoch in seconds.
ETA (float): Time per epoch in seconds.
Returns:
monitor_data (dict): The monitor data as a dict.
"""
s_logger = status_logging.get_status_logger()
monitor_data = {
"epoch": current_epoch,
"max_epoch": max_epoch,
"time_per_epoch": str(timedelta(seconds=time_per_epoch)),
"ETA": str(timedelta(seconds=ETA)),
"mini_batch_loss": loss_value,
"running_average_loss": running_avg_loss,
}
# Save the json file.
try:
s_logger.graphical = {
"loss": running_avg_loss,
}
s_logger.write(
data=monitor_data,
status_level=status_logging.Status.RUNNING)
except IOError:
# We let this pass because we do not want the json file writing to crash the whole job.
pass
# Save the json file.
filename = os.path.join(save_path, MONITOR_JSON_FILENAME)
try:
with open(filename, "w") as f:
json.dump(monitor_data, f)
except IOError:
# We let this pass because we do not want the json file writing to crash the whole job.
pass
class TrainingHook(tf.estimator.SessionRunHook):
"""Hook to gather and save the Total loss after every training iteration."""
def __init__(self, logger, steps_per_epoch, max_epochs, save_path, params, log_every=1):
"""Initialize TrainingHook.
Args:
logger (str): Output dir to store the losses log file.
max_steps (int): The key to decode the model.
log_every (int): Save every N steps.
"""
self._log_every = log_every
self._iter_idx = 0
self.logger = logger
self.steps_per_epoch = steps_per_epoch
self.max_epochs = max_epochs
# Initialize variables for epoch time calculation.
self.time_per_epoch = 0
self._step_start_time = None
# Closest estimate of the start time, in case starting from mid-epoch.
self._epoch_start_time = time.time()
self.save_path = save_path
self.params = params
self.run_average_loss = 0
self.run_average_loss_sum = 0
def before_run(self, run_context):
"""Losses are consolidated before a training run."""
run_args = tf.estimator.SessionRunArgs(
fetches={
"total_loss" : 'total_loss_ref:0',
"step": tf.train.get_or_create_global_step()
}
)
self._step_start_time = time.time()
return run_args
def after_run(self,
run_context,
run_values):
"""Losses are consolidated after a training run."""
cur_step = run_values.results["step"]
cur_step_per_epoch = (cur_step + 1) % self.steps_per_epoch
if (cur_step + 1) % self.steps_per_epoch == 0:
# Last step of an epoch is completed.
epoch_end_time = time.time()
self.time_per_epoch = epoch_end_time - self._epoch_start_time
# First step of a new epoch is completed. Update the time when step was started.
self._epoch_start_time = self._step_start_time
total_loss = run_values.results["total_loss"]
self.run_average_loss_sum += total_loss
if cur_step_per_epoch:
self.run_average_loss = self.run_average_loss_sum/float(cur_step_per_epoch)
else:
self.run_average_loss = self.run_average_loss_sum/float(self.steps_per_epoch)
self.run_average_loss_sum = 0
if (cur_step % self.steps_per_epoch == 0) and (distribution.get_distributor().rank() == 0):
current_epoch = int(cur_step / self.steps_per_epoch)
write_monitor_json(
save_path=self.save_path,
loss_value=float(total_loss),
running_avg_loss=float(self.run_average_loss),
current_epoch=current_epoch,
max_epoch=self.max_epochs,
time_per_epoch=self.time_per_epoch,
ETA=(self.max_epochs - current_epoch) * self.time_per_epoch,
)
if (cur_step % self._log_every == 0) and (distribution.get_distributor().rank() == 0):
current_epoch = float(cur_step / self.steps_per_epoch)
self.logger.info(
"Epoch: %f/%d:, Cur-Step: %d, loss(%s): %0.5f, Running average loss:"
"%0.5f, Time taken: %s ETA: %s"
% (
current_epoch,
self.max_epochs,
cur_step,
self.params.loss,
float(total_loss),
float(self.run_average_loss),
self.time_per_epoch,
(self.max_epochs - current_epoch) * self.time_per_epoch,
)
)
gc.collect()
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/unet/hooks/training_hook.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Hook to log the thoroughput and latency."""
import time
import numpy as np
import tensorflow as tf
from nvidia_tao_tf1.cv.unet.distribution import distribution
from nvidia_tao_tf1.cv.unet.utils.parse_results import process_performance_stats
class ProfilingHook(tf.estimator.SessionRunHook):
"""Saves thoroughput and latency every N steps."""
def __init__(self, logger, batch_size, log_every, warmup_steps, mode):
"""Initialize ProfilingHook.
Args:
logger (str): Logger object to log the losses.
batch_size (int): The batch size to compute the performance metrics.
log_every (int): Save every N steps.
warmup_steps (int): The warm up steps after which the logging is done.
mode (str): The mode whether training or evaluation.
"""
self._log_every = log_every
self._warmup_steps = warmup_steps
self._current_step = 0
self._global_batch_size = batch_size * distribution.get_distributor().size()
self._t0 = 0
self._timestamps = []
self.logger = logger
self.mode = mode
def before_run(self, run_context):
"""Training time start recording before a training run."""
if self._current_step > self._warmup_steps:
self._t0 = time.time()
def after_run(self,
run_context,
run_values):
"""Training time start recording before a training run."""
if self._current_step > self._warmup_steps:
self._timestamps.append(time.time() - self._t0)
self._current_step += 1
def begin(self):
"""Begin of tensorflow session."""
pass
def end(self, session):
"""End of tensorflow session."""
if distribution.get_distributor().rank() == 0:
throughput_imgps, latency_ms = process_performance_stats(np.array(self._timestamps),
self._global_batch_size)
self.logger.log(step=(),
data={'throughput_{}'.format(self.mode): throughput_imgps,
'latency_{}'.format(self.mode): latency_ms})
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/unet/hooks/profiling_hook.py |
# Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
"""IVA Unet entrypoint scripts."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/unet/scripts/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Script to export a trained UNet model to an ETLT file for deployment."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
from datetime import datetime as dt
import logging
import os
import nvidia_tao_tf1.cv.common.logging.logging as status_logging
from nvidia_tao_tf1.cv.unet.export.unet_exporter import UNetExporter as Exporter
logger = logging.getLogger(__name__)
DEFAULT_MAX_WORKSPACE_SIZE = 1 * (1 << 30)
DEFAULT_MAX_BATCH_SIZE = 1
DEFAULT_MIN_BATCH_SIZE = 1
DEFAULT_OPT_BATCH_SIZE = 1
def build_command_line_parser(parser=None):
"""Build a command line parser."""
if parser is None:
parser = argparse.ArgumentParser(description='Export a trained TLT model')
parser.add_argument("-m",
"--model",
help="Path to the model file.",
type=str,
required=True,
default=None)
parser.add_argument("-k",
"--key",
help="Key to load the model.",
type=str,
default="",
required=False)
parser.add_argument("-e",
"--experiment_spec",
type=str,
default=None,
required=True,
help="Path to the experiment spec file.")
parser.add_argument("-o",
"--output_file",
type=str,
default=None,
help="Output file (defaults to $(input_filename).onnx)")
parser.add_argument("--data_type",
type=str,
default="fp32",
help="Data type for the TensorRT export.",
choices=["fp32", "fp16", "int8"])
parser.add_argument("--max_workspace_size",
type=int,
default=DEFAULT_MAX_WORKSPACE_SIZE,
# help="Max size of workspace to be set for TensorRT engine builder.")
help=argparse.SUPPRESS)
parser.add_argument("--max_batch_size",
type=int,
default=DEFAULT_MAX_BATCH_SIZE,
# help="Max batch size for TensorRT engine builder.")
help=argparse.SUPPRESS)
parser.add_argument("--min_batch_size",
type=int,
default=DEFAULT_MIN_BATCH_SIZE,
# help="Min batch size for TensorRT engine builder.")
help=argparse.SUPPRESS)
parser.add_argument("--opt_batch_size",
type=int,
default=DEFAULT_OPT_BATCH_SIZE,
# help="Opt batch size for TensorRT engine builder.")
help=argparse.SUPPRESS)
parser.add_argument("--gen_ds_config",
action="store_true",
default=False,
help="Generate a template DeepStream related configuration elements. "
"This config file is NOT a complete configuration file and requires "
"the user to update the sample config files in DeepStream with the "
"parameters generated from here.")
parser.add_argument("--engine_file",
type=str,
default=None,
# help="Path to the exported TRT engine.")
help=argparse.SUPPRESS)
parser.add_argument("-v",
"--verbose",
action="store_true",
default=False,
help="Verbosity of the logger.")
parser.add_argument("-s",
"--strict_type_constraints",
action="store_true",
default=False,
# help="Apply TensorRT strict_type_constraints or not")
help=argparse.SUPPRESS)
# Int8 calibration arguments.
parser.add_argument("--batch_size",
type=int,
default=16,
# help="Number of images per batch for calibration.")
help=argparse.SUPPRESS)
parser.add_argument("--cal_data_file",
default="",
type=str,
# help="Tensorfile to run calibration for int8 optimization.")
help=argparse.SUPPRESS)
parser.add_argument("--cal_image_dir",
default="",
type=str,
# help="Directory of images to run int8 calibration if "
# "data file is unavailable")
help=argparse.SUPPRESS)
parser.add_argument("--cal_json_file",
default="",
type=str,
help="Dictionary containing tensor scale for QAT models.")
parser.add_argument('--cal_cache_file',
default='./cal.bin',
type=str,
# help='Calibration cache file to write to.')
help=argparse.SUPPRESS)
parser.add_argument("--batches",
type=int,
default=10,
# help="Number of batches to calibrate over.")
help=argparse.SUPPRESS)
parser.add_argument("--results_dir",
type=str,
default=None,
help="Path to the files where the logs are stored.")
parser.add_argument("--force_ptq",
action="store_true",
default=False,
# help="Flag to force post training quantization for QAT models.")
help=argparse.SUPPRESS)
return parser
def parse_command_line(args=None):
"""Simple function to parse arguments."""
parser = build_command_line_parser()
args = vars(parser.parse_args(args))
return args
def build_exporter(model_path, key,
experiment_spec="",
data_type="fp32",
strict_type=False):
"""Simple function to build exporter instance."""
constructor_kwargs = {'model_path': model_path,
'key': key,
"experiment_spec_path": experiment_spec,
'data_type': data_type,
'strict_type': strict_type}
return Exporter(**constructor_kwargs)
def main(cl_args=None):
"""CLI wrapper to run export.
This function parses the command line interface for tlt-export, instantiates the respective
exporter and serializes the trained model to an etlt file. The tools also runs optimization
to the int8 backend.
Args:
cl_args(list): Arguments to parse.
Returns:
No explicit returns.
"""
args = parse_command_line(args=cl_args)
run_export(args)
def run_export(args):
"""Wrapper to run export of tlt models.
Args:
args (dict): Dictionary of parsed arguments to run export.
Returns:
No explicit returns.
"""
results_dir = args["results_dir"]
# Parsing command line arguments.
model_path = args['model']
key = args['key']
data_type = args['data_type']
output_file = args['output_file']
experiment_spec = args['experiment_spec']
engine_file_name = args['engine_file']
max_workspace_size = args["max_workspace_size"]
max_batch_size = args["max_batch_size"]
strict_type = args['strict_type_constraints']
cal_data_file = args["cal_data_file"]
cal_image_dir = args["cal_image_dir"]
cal_cache_file = args["cal_cache_file"]
n_batches = args["batches"]
batch_size = args["batch_size"]
gen_ds_config = args["gen_ds_config"]
min_batch_size = args["min_batch_size"]
opt_batch_size = args["opt_batch_size"]
force_ptq = args["force_ptq"]
cal_json_file = args["cal_json_file"]
save_engine = False
if engine_file_name is not None:
save_engine = True
log_level = "INFO"
if args['verbose']:
log_level = "DEBUG"
# Configure the logger.
logging.basicConfig(format='%(asctime)s [TAO Toolkit] [%(levelname)s] %(name)s %(lineno)d: %(message)s',
level=log_level)
# Set default output filename if the filename
# isn't provided over the command line.
if output_file is None:
split_name = model_path.replace(".tlt","")
output_file = "{}.onnx".format(split_name)
# Warn the user if an exported file already exists.
assert not os.path.exists(output_file), "Default output file {} already "\
"exists".format(output_file)
# Make an output directory if necessary.
output_root = os.path.dirname(os.path.realpath(output_file))
if not os.path.exists(output_root):
os.makedirs(output_root)
if not results_dir:
results_dir = output_root
if not os.path.exists(results_dir):
os.makedirs(results_dir)
timestamp = int(dt.timestamp(dt.now()))
filename = "status.json"
if results_dir == "/workspace/logs":
filename = f"status_export_{timestamp}.json"
status_file = os.path.join(results_dir, filename)
status_logging.set_status_logger(
status_logging.StatusLogger(
filename=status_file,
is_master=True
)
)
status_logger = status_logging.get_status_logger()
# Build exporter instance
status_logger.write(message="Building exporter object.")
exporter = build_exporter(model_path, key,
experiment_spec=experiment_spec,
data_type=data_type,
strict_type=strict_type)
# Export the model to etlt file and build the TRT engine.
status_logger.write(message="Exporting the model.")
exporter.export(output_file_name=output_file,
backend="onnx",
save_engine=save_engine,
engine_file_name=engine_file_name,
max_batch_size=max_batch_size,
min_batch_size=min_batch_size,
opt_batch_size=opt_batch_size,
max_workspace_size=max_workspace_size,
data_file_name=cal_data_file,
calib_json_file=cal_json_file,
calibration_images_dir=cal_image_dir,
calibration_cache=cal_cache_file,
n_batches=n_batches,
batch_size=batch_size,
gen_ds_config=gen_ds_config,
force_ptq=force_ptq)
status_logger.write(
data=None,
status_level=status_logging.Status.SUCCESS,
message="Unet export job complete."
)
if __name__ == "__main__":
try:
main()
status_logging.get_status_logger().write(
status_level=status_logging.Status.SUCCESS,
message="Export finished successfully."
)
except (KeyboardInterrupt, SystemExit):
status_logging.get_status_logger().write(
message="Export was interrupted",
verbosity_level=status_logging.Verbosity.INFO,
status_level=status_logging.Status.FAILURE
)
except Exception as e:
status_logging.get_status_logger().write(
message=str(e),
status_level=status_logging.Status.FAILURE
)
raise e
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/unet/scripts/export.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command line interface for converting COCO json to VOC images."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import json
import logging
import os
import sys
import numpy as np
from PIL import Image
from pycocotools import mask as maskUtils
from pycocotools.coco import COCO
from skimage.measure import label, regionprops
import tensorflow as tf
import nvidia_tao_tf1.cv.common.logging.logging as status_logging
logger = logging.getLogger(__name__)
def build_command_line_parser(parser=None):
"""Build command line parser for dataset_convert."""
if parser is None:
parser = argparse.ArgumentParser(
prog='dataset_converter',
description='Convert COCO json to VOC images.'
)
parser.add_argument(
'-f',
'--coco_file',
required=True,
help='Path to COCO json file.')
parser.add_argument(
'-n',
'--num_files',
type=int,
default=None,
required=False,
help='Number of images to convert from COCO json to VOC.'
'These will be first N COCO records ')
parser.add_argument(
"-r",
"--results_dir",
type=str,
required=True,
help="Path to the results directory where the VOC images are saved."
)
return parser
def anns_to_seg(anns, coco_instance, category_ids, s_logger, skipped_annotations,
log_list):
"""
Converts COCO-format annotations of a given image to a PASCAL-VOC segmentation style label.
Args:
anns (dict): COCO annotations as returned by 'coco.loadAnns'
coco_instance (class): coco class instance
category_ids (List): label ids for different classes
s_logger (class): logger class
skipped_annotations (int): Total number of skipped annotations
log_list (list): List of logging info
Returns:
Three 2D numpy arrays where the value of each pixel is the class id,
instance number, and instance id.
"""
image_details = coco_instance.loadImgs(anns[0]['image_id'])[0]
h = image_details['height']
w = image_details['width']
class_seg = np.zeros((h, w))
instance_seg = np.zeros((h, w))
id_seg = np.zeros((h, w))
masks, anns, skipped_annotations, log_list = anns_to_mask(anns, h, w, category_ids,
s_logger,
skipped_annotations,
log_list)
for i, mask in enumerate(masks):
class_seg = np.where(class_seg > 0, class_seg, mask*anns[i]['category_id'])
instance_seg = np.where(instance_seg > 0, instance_seg, mask*(i+1))
id_seg = np.where(id_seg > 0, id_seg, mask * anns[i]['id'])
return class_seg, image_details["file_name"], skipped_annotations, log_list
def ann_to_RLE(ann, h, w):
"""
Convert annotation which can be polygons, uncompressed RLE to RLE.
Args:
ann (Dict): Dictionary with annotation details
h (int): height of input image
w (int): width of input image
Returns:
Binary mask (numpy 2D array)
"""
image_id = ann["image_id"]
assert('segmentation' in ann.keys()), "Segmentation field is absent in the" \
"COCO json file for image id: {}.".format(image_id)
segm = ann['segmentation']
if isinstance(segm, list):
# polygon -- a single object might consist of multiple parts
# we merge all parts into one mask rle code
rles = maskUtils.frPyObjects(segm, h, w)
rle = maskUtils.merge(rles)
elif 'counts' in segm.keys():
if type(segm['counts']) == list:
# uncompressed RLE
rle = maskUtils.frPyObjects(segm, h, w)
else:
rle = ann['segmentation']
else:
raise ValueError('Please check the segmentation format.')
return rle
def anns_to_mask(anns, h, w, category_ids, s_logger, skipped_annotations, log_list):
"""
Convert annotations which can be polygons, uncompressed RLE, or RLE to binary masks.
Returns:
masks(list): A list of binary masks (each a numpy 2D array) of all the annotations in anns
anns(list): List of annotations
"""
masks = []
for ann in anns:
ann_error = {}
if len(ann) > 0:
if not ('image_id' in ann.keys()):
logging.warning("image_id field is absent in the COCO json file.")
s_logger.write(
message="image_id field is absent in the COCO json file.",
status_level=status_logging.Verbosity.WARNING)
skipped_annotations += 1
ann_error["error"] = "image_id field is absent in the COCO json file."
ann_error["image_id"] = ann["image_id"]
ann_error["annotation_id"] = ann["id"]
log_list.append(ann_error)
continue
image_id = ann["image_id"]
if not ('segmentation' in ann.keys()):
logging.warning("Segmentation field is absent in the COCO"
"json file for image id: {}.".format(image_id))
s_logger.write(
message="Segmentation field is absent in the COCO"
"json file for image id: {}.".format(image_id),
status_level=status_logging.Verbosity.WARNING)
ann_error["error"] = "Segmentation field is absent in the COCO file."
ann_error["image_id"] = ann["image_id"]
ann_error["annotation_id"] = ann["id"]
log_list.append(ann_error)
skipped_annotations += 1
continue
# Check if the assigned category id is in the accepted list of category ids
if not (ann["category_id"] in category_ids):
logging.warning("Skipping annotation_id:{} in image_id:{}, as the category_id:{}"
"is not in supported category_ids:{}".format(ann["id"],
ann["image_id"],
ann["category_id"],
category_ids))
s_logger.write(
message="Skipping annotation_id:{} in image_id:{}, as the category_id:{}"
"is not in supported category_ids:{}".format(ann["id"],
ann["image_id"],
ann["category_id"],
category_ids),
status_level=status_logging.Verbosity.WARNING)
ann_error["error"] = "The category id provided is not in supported" \
"category id's: {}.".format(category_ids)
ann_error["image_id"] = ann["image_id"]
ann_error["category_id"] = ann["category_id"]
ann_error["annotation_id"] = ann["id"]
log_list.append(ann_error)
skipped_annotations += 1
continue
rle = ann_to_RLE(ann, h, w)
m = maskUtils.decode(rle)
label_tmp = label(m)
props = regionprops(label_tmp)
for prop in props:
# Get the tightest bounding box of the binary mask
x1, y1, x2, y2 = prop.bbox[1], prop.bbox[0], prop.bbox[3], prop.bbox[2]
# Check the boundary conditions for the segmentation tight box
if not (x1 < x2 <= w and y1 < y2 <= h):
logging.warning("Skipping annotation_id:{} in image_id:{},"
"as the segmentation map of "
"is out of bounds or faulty.".format(ann["image_id"],
ann["id"]))
s_logger.write(
message="Skipping annotation_id:{} in image_id:{},"
"as the segmentation map of "
"is out of bounds or faulty.".format(ann["image_id"],
ann["id"]),
status_level=status_logging.Verbosity.WARNING)
ann_error["error"] = "The segmentation map is out of bounds or faulty."
ann_error["image_id"] = ann["image_id"]
ann_error["annotation_id"] = ann["id"]
log_list.append(ann_error)
skipped_annotations += 1
continue
masks.append(m)
return masks, anns, skipped_annotations, log_list
def coco2voc(anns_file, target_folder, n=None, s_logger=None):
"""Function to convert COCO json file to VOC images.
Args:
anns_file (str):
target_folder (str):
n (int):
s_logger (logger class):
"""
skipped_annotations, log_list = 0, []
skipped_images = 0
coco_instance = COCO(anns_file)
coco_imgs = coco_instance.imgs
super_categories = coco_instance.cats
category_ids = []
for sc in super_categories:
category_ids.append(sc)
if n is None:
n = len(coco_imgs)
else:
if not isinstance(n, int):
s_logger.write(message="N must be int.", status_level=status_logging.Status.FAILURE)
raise TypeError("N must be set as an int.")
if n <= 0:
s_logger.write(message="N must be greater than 0.",
status_level=status_logging.Status.FAILURE)
raise ValueError("N must be greater than 0.")
if n > len(coco_imgs):
s_logger.write(
message="N must be less than or equal to total number of images"
"in the COCO json file."
"Setting the N to total number of images in the coco json.",
status_level=status_logging.Verbosity.WARNING)
n = min(n, len(coco_imgs))
logger.info("Number of images that are going to be converted {}".format(n))
s_logger.write(message="Number of images that are going to be converted {}".format(n))
# Some images may not have coco object, hence we need to account
# for that to count total images saved. So counter to count the total
# number of images actually saved.
img_cntr = 0
for _, img in enumerate(coco_imgs):
img_error = {}
anns_ids = coco_instance.getAnnIds(img)
anns = coco_instance.loadAnns(anns_ids)
if not anns:
logging.warning("Skipping image {} that does not have"
" coco annotation".format(img))
s_logger.write(
message="Skipping image {} that does not have"
" coco annotation".format(img),
status_level=status_logging.Verbosity.WARNING)
skipped_images += 1
img_error["error"] = "Image does not have annotation field defined."
img_error["image_id"] = img
log_list.append(img_error)
continue
class_seg, fn, skipped_annotations, log_list = anns_to_seg(
anns, coco_instance, category_ids, s_logger, skipped_annotations,
log_list)
img_name = fn.split("/")[-1]
img_name = img_name.split(".")[0]
save_img_name = os.path.join(target_folder, img_name+".png")
img_cntr += 1
Image.fromarray(class_seg).convert("L").save(save_img_name)
if img_cntr >= n:
break
# Logging the buggy anotations and images
logging.info("The total number of skipped annotations are {}".format(skipped_annotations))
logging.info("The total number of skipped images are {}".format(skipped_images))
log_file = os.path.join(target_folder, "skipped_annotations_log.json")
try:
with open(log_file, "w") as final:
json.dump(log_list, final)
finally:
logging.info("The details of faulty annotations and images that were skipped"
" are logged in {}".format(log_file))
def parse_command_line_args(cl_args=None):
"""Parse sys.argv arguments from commandline.
Args:
cl_args: List of command line arguments.
Returns:
args: list of parsed arguments.
"""
parser = build_command_line_parser()
args = parser.parse_args(cl_args)
return args
def main(args=None):
"""
Convert a COCO json to VOC dataset format.
Args:
args(list): list of arguments to be parsed if called from another module.
"""
args = parse_command_line_args(cl_args=args)
verbosity = 'INFO'
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=verbosity)
# Defining the results directory.
results_dir = args.results_dir
if results_dir is not None:
if not os.path.exists(results_dir):
os.makedirs(results_dir)
status_file = os.path.join(results_dir, "status.json")
status_logging.set_status_logger(
status_logging.StatusLogger(
filename=status_file,
is_master=True,
verbosity=logger.getEffectiveLevel(),
append=False
)
)
s_logger = status_logging.get_status_logger()
s_logger.write(
data=None,
message="Starting Semantic Segmentation Dataset to VOC Convert.",
status_level=status_logging.Status.STARTED
)
try:
coco2voc(args.coco_file, args.results_dir, args.num_files, s_logger)
s_logger.write(
status_level=status_logging.Status.SUCCESS,
message="Conversion finished successfully."
)
except Exception as e:
s_logger.write(
status_level=status_logging.Status.FAILURE,
message="Conversion failed with following error: {}.".format(e)
)
raise e
if __name__ == '__main__':
try:
main()
status_logging.get_status_logger().write(
status_level=status_logging.Status.SUCCESS,
message="Dataset convert finished successfully."
)
except Exception as e:
if type(e) == tf.errors.ResourceExhaustedError:
logger = logging.getLogger(__name__)
logger.error(
"Ran out of GPU memory, please lower the batch size, use a smaller input "
"resolution, or use a smaller backbone."
)
status_logging.get_status_logger().write(
message="Ran out of GPU memory, please lower the batch size, use a smaller input "
"resolution, or use a smaller backbone.",
verbosity_level=status_logging.Verbosity.INFO,
status_level=status_logging.Status.FAILURE
)
sys.exit(1)
else:
# throw out the error as-is if they are not OOM error
raise e
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/unet/scripts/dataset_convert.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Perform continuous training for Unet Object Segmentation with Images and Masks.
This code does nothing else than training. There's no validation or
inference in this code. Use separate scripts for those purposes.
Short code breakdown:
(1) Creates the Runtime_config and creates the estimator
(2) Hook up the data pipe and estimator to unet model with backbones such as
Resnet, Vanilla Unet (https://arxiv.org/abs/1505.04597)
(3) Set up losses, metrics, hooks.
(4) Perform training steps.
"""
import argparse
import json
import logging
import os
import sys
import time
import shutil
from google.protobuf.json_format import MessageToDict
import tensorflow as tf
import wandb
import nvidia_tao_tf1.cv.common.logging.logging as status_logging
from nvidia_tao_tf1.cv.common.mlops.clearml import get_clearml_task
from nvidia_tao_tf1.cv.common.mlops.wandb import check_wandb_logged_in, initialize_wandb
from nvidia_tao_tf1.cv.unet.distribution import distribution
from nvidia_tao_tf1.cv.unet.dllogger.logger import JSONStreamBackend, Logger, StdOutBackend, \
Verbosity
from nvidia_tao_tf1.cv.unet.hooks.checkpoint_saver_hook import IVACheckpointSaverHook
from nvidia_tao_tf1.cv.unet.hooks.latest_checkpoint import LatestCheckpoint
from nvidia_tao_tf1.cv.unet.hooks.pretrained_restore_hook import PretrainedWeightsLoadingHook
from nvidia_tao_tf1.cv.unet.hooks.profiling_hook import ProfilingHook
from nvidia_tao_tf1.cv.unet.hooks.training_hook import TrainingHook
from nvidia_tao_tf1.cv.unet.model.build_unet_model import build_model
from nvidia_tao_tf1.cv.unet.model.build_unet_model import get_base_model_config
from nvidia_tao_tf1.cv.unet.model.build_unet_model import select_model_proto
from nvidia_tao_tf1.cv.unet.model.utilities import build_target_class_list
from nvidia_tao_tf1.cv.unet.model.utilities import (
get_custom_objs,
get_latest_tlt_model,
get_pretrained_ckpt,
get_pretrained_model_path,
get_results_dir,
get_train_class_mapping,
get_weights_dir
)
from nvidia_tao_tf1.cv.unet.model.utilities import initialize, initialize_params, save_tmp_json
from nvidia_tao_tf1.cv.unet.model.utilities import update_model_params, update_train_params
from nvidia_tao_tf1.cv.unet.spec_handler.spec_loader import load_experiment_spec
from nvidia_tao_tf1.cv.unet.utils.data_loader import Dataset
from nvidia_tao_tf1.cv.unet.utils.model_fn import unet_fn
logger = logging.getLogger(__name__)
tf.logging.set_verbosity(tf.logging.WARN)
def run_training_loop(estimator, dataset, params, unet_model,
profile_logger, key, pre_train_hook, warm_start):
"""Run the training loop using the estimator.
Args:
estimator (class): estimator object wrapped with run config parameters.
dataset (dataset object): Dataset object fro the dataloader utility.
params (dict): Parameters to feed to Estimator.
unet_model (keras model instance): Keras Unet Model.
profile_logger (logger instance): Logging the training updates.
key (str): The key to encrypt the model.
pre_train_hook (class): The hook used to load the pre-trained weights.
"""
logger.debug("Running training loop.")
status_logging.get_status_logger().write(data=None, message="Running training loop.")
logger.info("Running for {} Epochs".format(params.epochs))
hooks = []
steps_to_train = params.max_steps - params.start_step
if steps_to_train == 0:
# There are no more steps to be trained
raise ValueError("Check the number of epochs mentioned in spec file should"
" be above 0 or if you are resuming training"
" the trainig has already completed for {} epochs".format(params.epochs))
if distribution.get_distributor().is_master():
if pre_train_hook:
hooks.append(pre_train_hook)
hooks.append(distribution.get_distributor().broadcast_global_variables_hook())
hooks.append(TrainingHook(logger,
steps_per_epoch=params.steps_per_epoch,
max_epochs=params.epochs,
params=params,
log_every=params.log_summary_steps,
save_path=params.model_dir))
checkpoint_n_steps = params.steps_per_epoch * params.checkpoint_interval
if distribution.get_distributor().is_master():
hooks.append(ProfilingHook(profile_logger,
batch_size=params.batch_size,
log_every=params.log_summary_steps,
warmup_steps=params.warmup_steps,
mode='train'))
hooks.append(IVACheckpointSaverHook(checkpoint_dir=params.model_dir,
save_secs=None,
save_steps=checkpoint_n_steps,
model_json=params.model_json,
saver=None,
checkpoint_basename="model.ckpt",
steps_per_epoch=params.steps_per_epoch,
scaffold=None,
listeners=None,
load_graph=params.load_graph
))
estimator.train(
input_fn=dataset.input_fn,
steps=steps_to_train,
hooks=hooks
)
def train_unet(results_dir, experiment_spec, ptm, model_file,
model_json=None, pruned_graph=False, key="None", custom_objs=None):
"""Run the training loop using the estimator.
Args:
results_dir (str): The path string where the trained model needs to be saved.
experiment_spec (dict): Experiment spec proto.
model_file (model_file): pre-trained model name for training starting point.
key: Key to encrypt the model.
"""
# Initialize the environment
initialize(experiment_spec)
# Initialize Params
params = initialize_params(experiment_spec)
if distribution.get_distributor().is_master():
if not os.path.isdir(results_dir):
os.makedirs(results_dir)
target_classes = build_target_class_list(
experiment_spec.dataset_config.data_class_config)
target_classes_train_mapping = get_train_class_mapping(target_classes)
with open(os.path.join(results_dir, 'target_class_id_mapping.json'), 'w') as fp:
json.dump(target_classes_train_mapping, fp)
# Build run config
model_config = select_model_proto(experiment_spec)
unet_model = build_model(m_config=model_config,
target_class_names=target_classes,
seed=params["seed"])
params = update_model_params(params=params, unet_model=unet_model,
experiment_spec=experiment_spec, key=key,
results_dir=results_dir,
target_classes=target_classes,
model_json=model_json,
custom_objs=custom_objs,
phase="train"
)
if params.enable_qat:
# Remove model json condition to re-train pruned model with QAT
# If load graph it is from pruned model
# We add QDQ nodes before session is formed
qat_on_pruned = params.load_graph
img_height, img_width, img_channels = \
experiment_spec.model_config.model_input_height, \
experiment_spec.model_config.model_input_width, \
experiment_spec.model_config.model_input_channels
model_qat_obj = unet_model.construct_model(
input_shape=(img_channels, img_height, img_width),
pretrained_weights_file=params.pretrained_weights_file,
enc_key=params.key, model_json=params.model_json,
features=None, construct_qat=True, qat_on_pruned=qat_on_pruned)
model_qat_json = save_tmp_json(model_qat_obj)
params.model_json = model_qat_json
backends = [StdOutBackend(Verbosity.VERBOSE)]
backends.append(JSONStreamBackend(
Verbosity.VERBOSE, params.model_dir+"/profile_log.txt"))
profile_logger = Logger(backends)
# Initialize env for AMP training
if params.use_amp:
os.environ['TF_ENABLE_AUTO_MIXED_PRECISION'] = '1'
else:
os.environ['TF_ENABLE_AUTO_MIXED_PRECISION'] = '0'
if os.getenv('TF_ENABLE_AUTO_MIXED_PRECISION'):
# Enable automatic loss scaling
os.environ["TF_ENABLE_AUTO_MIXED_PRECISION_LOSS_SCALING"] = "1"
# Initialize the config for multi-gpu training
distributor = distribution.get_distributor()
config = distributor.get_config()
config.gpu_options.force_gpu_compatible = True
config.intra_op_parallelism_threads = 1 # Avoid pool of Eigen threads
config.inter_op_parallelism_threads = \
max(2, 40 // distributor.size() - 2)
if params.use_xla:
config.graph_options.optimizer_options.global_jit_level = \
tf.compat.v1.OptimizerOptions.ON_1
run_config = tf.estimator.RunConfig(
save_summary_steps=None,
tf_random_seed=None,
session_config=config,
save_checkpoints_steps=None,
save_checkpoints_secs=None,
keep_checkpoint_every_n_hours=None,
log_step_count_steps=None
)
res_hook = LatestCheckpoint(key, params.model_dir)
warm_start = None
pre_train_hook = None
if res_hook.ckpt:
# Load the latest checkpoint in dir if the training is resumed
skip_checkpoint_variables = None
logger.debug("Resuming from checkpoint {}.".format(res_hook.ckpt))
warm_start = tf.estimator.WarmStartSettings(ckpt_to_initialize_from=res_hook.ckpt,
vars_to_warm_start=[".*"])
resuming_checkpoint = int(res_hook.ckpt.split("-")[-1])
status_logging.get_status_logger().write(data=None, message="Resuming from checkpoint.")
if res_hook.model_json:
# We update json if pruned model resuming
params.model_json = res_hook.model_json
pruned_graph = True
else:
# If the user has provided a pre-trained weights path
if ptm:
pre_trained_weights = ptm[1]
# We start training as a new experiment from step 0. Hence donot
# restore the global step
# For hdf5 checkpoint, we need to manualy develop the assignment map
skip_checkpoint_variables = "global_step"
# Hook initialize the session with the checkpoint to be initialized
pre_train_hook = PretrainedWeightsLoadingHook(
prefix="",
checkpoint_path=pre_trained_weights,
skip_variables_regex=skip_checkpoint_variables,
remove_head=params.remove_head
)
# If the model_son is present, it is a pruned model. Check load graph is set.
if params.model_json and pruned_graph:
assert params.load_graph, "Load graph needs to be set for re-training of pruned model."
if not pruned_graph:
assert not params.load_graph, "Load graph should not be set if not \
re-training pruned model."
# Check if model json is available if load graph is set
if params.load_graph:
assert params.model_json, \
"Load graph should be set only when you fine-tuning from a pruned model/ \
Resuming training in phase 1 from a pruned checkpoint."
logger.info("Retrieving model template from {}".format(params.model_json))
dataset = Dataset(batch_size=params.batch_size,
fold=params.crossvalidation_idx,
augment=params.augment,
gpu_id=distributor.rank(),
num_gpus=distributor.size(),
params=params,
phase="train",
target_classes=target_classes,
buffer_size=params.buffer_size,
data_options=params.data_options,
filter_data=params.filter_data
)
# Update params for number of epochs
params = update_train_params(params, num_training_examples=dataset.train_size)
if res_hook.ckpt:
params.start_step = resuming_checkpoint * params.steps_per_epoch
estimator = tf.estimator.Estimator(
model_fn=unet_fn,
model_dir=params.model_dir,
config=run_config,
params=params,
warm_start_from=warm_start)
run_training_loop(estimator, dataset, params, unet_model,
profile_logger, key, pre_train_hook, warm_start)
# Saving the last training step model to weights directory
latest_tlt = get_latest_tlt_model(params.model_dir)
logger.info("Saving the final step model to {}".format(model_file))
if distribution.get_distributor().is_master():
shutil.copytree(latest_tlt, model_file)
def run_experiment(config_path, results_dir, pretrained_model_file=None,
model_name="model", override_spec_path=None,
key=None, verbosity="INFO", wandb_logged_in=False):
"""
Launch experiment that trains the model.
NOTE: Do not change the argument names without verifying that
cluster submission works.
Args:
config_path (list): List containing path to a text file containing a
complete experiment configuration and possibly a path to a .yml file
containing override parameter values.
results_dir (str): Path to a folder where various training
outputs will be written. If the folder
does not already exist, it will be created.
pretrained_model_file (str):Optional path to a pretrained model file. This maybe invoked
from the CLI if needed. For now, we have disabled support to maintain
consistency across all magnet apps.
model_name (str): Model name to be used as a part of model file name.
override_spec_path (str): Absolute path to yaml file which is used to
overwrite some of the experiment spec parameters.
key (str): Key to save and load models from tlt.
verbosity (str): Logging verbosity among ["INFO", "DEBUG"].
wandb_logged_in (bool): Check if wandb credentials were set.
"""
logger.debug("Starting experiment.")
model_path = get_weights_dir(results_dir)
model_file = os.path.join(model_path, '%s.tlt' % model_name)
if distribution.get_distributor().is_master():
output_file_handler = logging.FileHandler(os.path.join(results_dir, "output.log"))
stdout_handler = logging.StreamHandler(sys.stdout)
logger.addHandler(output_file_handler)
logger.addHandler(stdout_handler)
logging.basicConfig(format='%(asctime)s [TAO Toolkit] [%(levelname)s] %(name)s %(lineno)d: %(message)s',
level=verbosity)
# Load experiment spec.
if config_path is not None:
# Create an experiment_pb2.Experiment object from the input file.
logger.info("Loading experiment spec at %s.", config_path)
# The spec in experiment_spec_path has to be complete.
# Default spec is not merged into experiment_spec.
experiment_spec = load_experiment_spec(
config_path, merge_from_default=False)
else:
logger.info("Loading default ISBI single class experiment spec.")
experiment_spec = load_experiment_spec()
# Extract core model config, which might be wrapped inside a
# TemporalModelConfig.
model_config = get_base_model_config(experiment_spec)
# Pretrained model can be provided either through CLI or spec. Expand and validate the path.
assert not (pretrained_model_file and model_config.pretrained_model_file), \
"Provide only one pretrained model file."
custom_objs = None
ptm = ()
input_model_file_name = get_pretrained_model_path(pretrained_model_file)
pre_trained_weights = None
# Dump experiment spec to result directory.
if distribution.get_distributor().is_master():
with open(os.path.join(results_dir, 'experiment_spec.txt'), 'w') as f:
f.write(str(experiment_spec))
if input_model_file_name:
_, ext = os.path.splitext(input_model_file_name)
logging.info("Initializing the pre-trained weights from {}".format
(input_model_file_name))
# Get the model_json here
pre_trained_weights, model_json, pruned_graph = \
get_pretrained_ckpt(input_model_file_name, key=key, custom_objs=custom_objs)
ptm = (ext, pre_trained_weights)
else:
# Assert if freeze blocks is provided only if pretrained weights are present.
if model_config.freeze_blocks:
raise ValueError("Freeze blocks is only possible if a pretrained model"
"file is provided.")
pre_trained_weights = None
model_json = None
pruned_graph = False
if distribution.get_distributor().is_master():
if experiment_spec.training_config.HasField("visualizer"):
visualizer_config = experiment_spec.training_config.visualizer
if visualizer_config.HasField("wandb_config"):
wandb_config = visualizer_config.wandb_config
logger.info("Integrating with W&B")
wandb_name = f"{wandb_config.name}" if wandb_config.name \
else "unet"
wandb_stream_config = MessageToDict(
experiment_spec,
preserving_proto_field_name=True,
including_default_value_fields=True
)
initialize_wandb(
project=wandb_config.project if wandb_config.project else None,
entity=wandb_config.entity if wandb_config.entity else None,
config=wandb_stream_config,
notes=wandb_config.notes if wandb_config.notes else None,
tags=wandb_config.tags if wandb_config.tags else None,
sync_tensorboard=True,
save_code=False,
results_dir=results_dir,
wandb_logged_in=wandb_logged_in,
name=wandb_name
)
if visualizer_config.HasField("clearml_config"):
logger.info("Integrating with clearml")
clearml_config = visualizer_config.clearml_config
get_clearml_task(clearml_config, "unet")
# Update custom_objs with Internal TAO custom layers
custom_objs = get_custom_objs(model_arch=model_config.arch)
train_unet(results_dir, experiment_spec, ptm, model_file,
model_json, pruned_graph, key=key, custom_objs=custom_objs)
status_logging.get_status_logger().write(data=None, message="Unet training complete.")
logger.debug("Experiment complete.")
status_logging.get_status_logger().write(
data=None,
status_level=status_logging.Status.SUCCESS,
message="Experiment complete."
)
def build_command_line_parser(parser=None):
"""
Parse command-line flags passed to the training script.
Returns:
Namespace with all parsed arguments.
"""
if parser is None:
parser = argparse.ArgumentParser(
prog='train', description='Train a segmentation model.')
default_experiment_path = os.path.join(os.path.expanduser('~'), 'experiments',
time.strftime("drivenet_%Y%m%d_%H%M%S"))
parser.add_argument(
'-e',
'--experiment_spec_file',
type=str,
default=None,
help='Path to spec file. Absolute path or relative to working directory. \
If not specified, default spec from spec_loader.py is used.'
)
parser.add_argument(
'-m',
'--pretrained_model_file',
type=str,
default=None,
help='Model path to the pre-trained weights.'
)
parser.add_argument(
'-r',
'--results_dir',
type=str,
default=default_experiment_path,
help='Path to a folder where experiment outputs should be written.'
)
parser.add_argument(
'-n',
'--model_name',
type=str,
default='model',
help='Name of the model file. If not given, then defaults to model.tlt.'
)
parser.add_argument(
'-v',
'--verbose',
action='store_true',
help='Set verbosity level for the logger.'
)
parser.add_argument(
'-k',
'--key',
default="",
type=str,
required=False,
help='The key to load pretrained weights and save intermediate snapshopts and final model.'
)
return parser
def parse_command_line_args(cl_args=None):
"""Parser command line arguments to the trainer.
Args:
cl_args(sys.argv[1:]): Arg from the command line.
Returns:
args: Parsed arguments using argparse.
"""
parser = build_command_line_parser(parser=None)
args = parser.parse_args(cl_args)
return args
def main(args=None):
"""Run the training process."""
args = parse_command_line_args(args)
# Set up logger verbosity.
verbosity = 'INFO'
if args.verbose:
verbosity = 'DEBUG'
# Enable Horovod distributor for multi-GPU training.
distribution.set_distributor(distribution.HorovodDistributor())
# Check the results dir path and create
results_dir = args.results_dir
results_dir = get_results_dir(results_dir)
events_dir = os.path.join(results_dir, "events")
is_master = distribution.get_distributor().is_master()
wandb_logged_in = False
if is_master:
if not os.path.exists(results_dir):
os.makedirs(results_dir)
if not os.path.exists(events_dir):
os.makedirs(events_dir)
wandb_logged_in = check_wandb_logged_in()
# Configure tf logger verbosity.
tf.logging.set_verbosity(tf.logging.INFO)
status_file = os.path.join(results_dir, "status.json")
status_logging.set_status_logger(
status_logging.StatusLogger(
filename=status_file,
is_master=is_master,
verbosity=logger.getEffectiveLevel(),
append=True
)
)
try:
status_logging.get_status_logger().write(
data=None,
status_level=status_logging.Status.STARTED,
message="Starting UNet Training job"
)
run_experiment(config_path=args.experiment_spec_file,
results_dir=args.results_dir,
model_name=args.model_name,
key=args.key,
pretrained_model_file=args.pretrained_model_file,
verbosity=verbosity,
wandb_logged_in=wandb_logged_in)
except (KeyboardInterrupt, SystemExit):
logger.info("Training was interrupted.")
status_logging.get_status_logger().write(
message="Training was interrupted",
verbosity_level=status_logging.Verbosity.INFO,
status_level=status_logging.Status.FAILURE
)
except Exception as e:
status_logging.get_status_logger().write(
message=str(e),
status_level=status_logging.Status.FAILURE
)
raise e
finally:
if distribution.get_distributor().is_master():
if wandb_logged_in:
wandb.finish()
if __name__ == "__main__":
main()
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/unet/scripts/train.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MagNet pruning wrapper for Unet model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
from datetime import datetime as dt
import logging
import os
import keras
from nvidia_tao_tf1.core.pruning.pruning import prune
import nvidia_tao_tf1.cv.common.logging.logging as status_logging
from nvidia_tao_tf1.cv.common.utils import (
get_model_file_size,
get_num_params,
)
from nvidia_tao_tf1.cv.unet.model.model_io import load_keras_model, save_keras_model
from nvidia_tao_tf1.cv.unet.spec_handler.spec_loader import load_experiment_spec
logger = logging.getLogger(__name__)
def build_command_line_parser(parser=None):
'''build parser.'''
if parser is None:
parser = argparse.ArgumentParser(description="TLT pruning script")
parser.add_argument("-m",
"--model",
type=str,
help="Path to the target model for pruning",
required=True,
default=None)
parser.add_argument("-o",
"--output_file",
type=str,
help="Output file path for pruned model",
required=True,
default=None)
parser.add_argument("-e",
"--experiment_spec_path",
type=str,
help="Path to experiment spec file",
required=True)
parser.add_argument('-k',
'--key',
required=False,
type=str,
default="",
help='Key to load a .tlt model')
parser.add_argument('-n',
'--normalizer',
type=str,
default='max',
help="`max` to normalize by dividing each norm by the \
maximum norm within a layer; `L2` to normalize by \
dividing by the L2 norm of the vector comprising all \
kernel norms. (default: `max`)")
parser.add_argument('-eq',
'--equalization_criterion',
type=str,
default='union',
help="Criteria to equalize the stats of inputs to an \
element wise op layer. Options are \
[arithmetic_mean, geometric_mean, union, \
intersection]. (default: `union`)")
parser.add_argument("-pg",
"--pruning_granularity",
type=int,
help="Pruning granularity: number of filters to remove \
at a time. (default:8)",
default=8)
parser.add_argument("-pth",
"--pruning_threshold",
type=float,
help="Threshold to compare normalized norm against \
(default:0.1)", default=0.1)
parser.add_argument("-nf",
"--min_num_filters",
type=int,
help="Minimum number of filters to keep per layer. \
(default:16)", default=8)
parser.add_argument("-el",
"--excluded_layers", action='store',
type=str, nargs='*',
help="List of excluded_layers. Examples: -i item1 \
item2", default=[])
parser.add_argument("--results_dir",
type=str,
default=None,
help="Path to the files where the logs are stored.")
parser.add_argument("-v",
"--verbose",
action='store_true',
help="Include this flag in command line invocation for \
verbose logs.")
return parser
def parse_command_line(args):
"""Simple function to parse command line arguments."""
parser = build_command_line_parser()
return parser.parse_args(args)
def run_pruning(args=None):
"""Prune an encrypted Keras model."""
# Set up logger verbosity.
verbosity = 'INFO'
if args.verbose:
verbosity = 'DEBUG'
results_dir = args.results_dir
if not results_dir:
results_dir = os.path.dirname(os.path.realpath(args.output_file))
if not os.path.exists(results_dir):
os.makedirs(results_dir)
timestamp = int(dt.timestamp(dt.now()))
filename = "status.json"
if results_dir == "/workspace/logs":
filename = f"status_prune_{timestamp}.json"
status_file = os.path.join(results_dir, filename)
status_logging.set_status_logger(
status_logging.StatusLogger(
filename=status_file,
is_master=True
)
)
s_logger = status_logging.get_status_logger()
# Configure the logger.
logging.basicConfig(
format='%(asctime)s [TAO Toolkit] [%(levelname)s] %(name)s %(lineno)d: %(message)s',
level=verbosity)
assert args.equalization_criterion in \
['arithmetic_mean', 'geometric_mean', 'union', 'intersection'], \
"Equalization criterion are [arithmetic_mean, geometric_mean, union, \
intersection]."
assert args.normalizer in ['L2', 'max'], \
"normalizer options are [L2, max]."
experiment_spec = load_experiment_spec(args.experiment_spec_path, merge_from_default=False)
final_model, custom_objs = load_keras_model(experiment_spec,
args.model,
export=False,
key=args.key)
logger.info("Original unpruned Model summary: ")
final_model.summary()
# Printing out the loaded model summary
force_excluded_layers = []
force_excluded_layers += final_model.output_names
# Pruning trained model
pruned_model = prune(
model=final_model,
method='min_weight',
normalizer=args.normalizer,
criterion='L2',
granularity=args.pruning_granularity,
min_num_filters=args.min_num_filters,
threshold=args.pruning_threshold,
equalization_criterion=args.equalization_criterion,
excluded_layers=args.excluded_layers + force_excluded_layers)
# Printing out pruned model summary
logger.info("Model summary of the pruned model:")
pruned_model.summary()
pruning_ratio = pruned_model.count_params() / final_model.count_params()
logger.info("Pruning ratio (pruned model / original model): {}".format(
pruning_ratio))
# Save the encrypted pruned model
save_keras_model(pruned_model, args.output_file, args.key)
s_logger.kpi = {
"pruning_ratio": pruning_ratio,
"size": get_model_file_size(args.output_file),
"param_count": get_num_params(pruned_model)
}
s_logger.write(
message="Pruning ratio (pruned model / original model): {}".format(
pruning_ratio
)
)
def main(args=None):
"""Wrapper function for pruning."""
try:
# parse command line
args = parse_command_line(args)
run_pruning(args)
status_logging.get_status_logger().write(
status_level=status_logging.Status.SUCCESS,
message="Pruning finished successfully."
)
except (KeyboardInterrupt, SystemExit):
status_logging.get_status_logger().write(
message="Pruning was interrupted",
verbosity_level=status_logging.Verbosity.INFO,
status_level=status_logging.Status.FAILURE
)
except Exception as e:
status_logging.get_status_logger().write(
message=str(e),
status_level=status_logging.Status.FAILURE
)
raise e
if __name__ == "__main__":
main()
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/unet/scripts/prune.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Perform Inference for the Unet Segmentation.
This code does the inference. Given the paths of the test images, it predicts
masks and dumps the visualized segmented images.
Short code breakdown:
(1) Creates the Runtime_config and creates the estimator
(2) Hook up the data pipe and estimator to unet model with backbones such as
Resnet, vanilla Unet
(3) Retrieves/ Encrypts the trained checkpoint.
(4) Performs Inference and dumps images with segmentation vis.
"""
import argparse
import json
import logging
import math
import os
import random
import cv2
import numpy as np
from PIL import Image
import tensorflow as tf
from tqdm import tqdm
import nvidia_tao_tf1.cv.common.logging.logging as status_logging
from nvidia_tao_tf1.cv.unet.dllogger.logger import JSONStreamBackend, Logger, StdOutBackend, \
Verbosity
from nvidia_tao_tf1.cv.unet.hooks.profiling_hook import ProfilingHook
from nvidia_tao_tf1.cv.unet.model.build_unet_model import build_model
from nvidia_tao_tf1.cv.unet.model.build_unet_model import select_model_proto
from nvidia_tao_tf1.cv.unet.model.model_io import _extract_ckpt
from nvidia_tao_tf1.cv.unet.model.utilities import build_target_class_list, get_train_class_mapping
from nvidia_tao_tf1.cv.unet.model.utilities import get_custom_objs, get_pretrained_ckpt, \
update_model_params
from nvidia_tao_tf1.cv.unet.model.utilities import initialize, initialize_params, save_tmp_json
from nvidia_tao_tf1.cv.unet.spec_handler.spec_loader import load_experiment_spec
from nvidia_tao_tf1.cv.unet.utils.data_loader import Dataset
from nvidia_tao_tf1.cv.unet.utils.inference_trt import Inferencer
from nvidia_tao_tf1.cv.unet.utils.model_fn import unet_fn
logger = logging.getLogger(__name__)
tf.logging.set_verbosity(tf.logging.INFO)
def resize_with_pad(image, f_target_width=None, f_target_height=None, inter=cv2.INTER_AREA):
"""Function to determine the padding width in all the directions."""
(im_h, im_w) = image.shape[:2]
ratio = max(im_w/float(f_target_width), im_h/float(f_target_height))
resized_height_float = im_h/ratio
resized_width_float = im_w/ratio
resized_height = math.floor(resized_height_float)
resized_width = math.floor(resized_width_float)
padding_height = (f_target_height - resized_height_float)/2
padding_width = (f_target_width - resized_width_float)/2
f_padding_height = math.floor(padding_height)
f_padding_width = math.floor(padding_width)
p_height_top = max(0, f_padding_height)
p_width_left = max(0, f_padding_width)
p_height_bottom = max(0, f_target_height-(resized_height+p_height_top))
p_width_right = max(0, f_target_width-(resized_width+p_width_left))
return p_height_top, p_height_bottom, p_width_left, p_width_right
def get_color_id(dataset):
"""Function to return a list of color values for each class."""
colors = []
for idx in range(dataset.num_classes):
random.seed(idx)
colors.append((random.randint(0, 255), random.randint(0, 255), random.randint(0, 255)))
return colors
def overlay_seg_image(inp_img, seg_img, resize_padding, resize_method):
"""The utility function to overlay mask on original image."""
resize_methods_mapping = {'BILINEAR': cv2.INTER_LINEAR, 'AREA': cv2.INTER_AREA,
'BICUBIC': cv2.INTER_CUBIC,
'NEAREST_NEIGHBOR': cv2.INTER_NEAREST}
rm = resize_methods_mapping[resize_method]
orininal_h = inp_img.shape[0]
orininal_w = inp_img.shape[1]
seg_h = seg_img.shape[0]
seg_w = seg_img.shape[1]
if resize_padding:
p_height_top, p_height_bottom, p_width_left, p_width_right = \
resize_with_pad(inp_img, seg_w, seg_h)
act_seg = seg_img[p_height_top:(seg_h-p_height_bottom), p_width_left:(seg_w-p_width_right)]
seg_img = cv2.resize(act_seg, (orininal_w, orininal_h), interpolation=rm)
else:
seg_img = cv2.resize(seg_img, (orininal_w, orininal_h), interpolation=rm)
fused_img = (inp_img/2 + seg_img/2).astype('uint8')
return fused_img
def visualize_masks(predictions, out_dir, input_image_type, img_names, colors,
mode="tlt", resize_padding=True, resize_method='BILINEAR',
activation="softmax"):
"""The function to visualize the segmentation masks.
Args:
predictions: Predicted masks numpy arrays.
out_dir: Output dir where the visualization is saved.
input_image_type: The input type of image (color/ grayscale).
img_names: The input image names.
"""
vis_dir = os.path.join(out_dir, "vis_overlay"+"_"+mode)
label_dir = os.path.join(out_dir, "mask_labels"+"_"+mode)
if not os.path.isdir(vis_dir):
os.makedirs(vis_dir)
if not os.path.isdir(label_dir):
os.makedirs(label_dir)
idx = 0
for p, img_name in tqdm(zip(predictions, img_names)):
pred = p['logits']
tags = img_name.split("/")
fn = tags[-1]
idx += 1
if activation == "softmax" or mode == "trt":
# TRT inference is squeezed too
output_height = pred.shape[0]
output_width = pred.shape[1]
else:
output_height = pred.shape[1]
output_width = pred.shape[2]
pred = np.squeeze(pred, axis=0)
if input_image_type == "grayscale":
pred = pred.astype(np.uint8)*255
img_resized = Image.fromarray(pred).resize(size=(output_width, output_height),
resample=Image.BILINEAR)
img_resized.save(os.path.join(vis_dir, fn))
else:
segmented_img = np.zeros((output_height, output_width, 3))
for c in range(len(colors)):
seg_arr_c = pred[:, :] == c
segmented_img[:, :, 0] += ((seg_arr_c)*(colors[c][0])).astype('uint8')
segmented_img[:, :, 1] += ((seg_arr_c)*(colors[c][1])).astype('uint8')
segmented_img[:, :, 2] += ((seg_arr_c)*(colors[c][2])).astype('uint8')
orig_image = cv2.imread(img_name)
fused_img = overlay_seg_image(orig_image, segmented_img, resize_padding,
resize_method)
cv2.imwrite(os.path.join(vis_dir, fn), fused_img)
mask_fn = "{}.png".format(os.path.splitext(fn)[0])
cv2.imwrite(os.path.join(label_dir, mask_fn), pred)
def run_inference_tlt(dataset, params, unet_model, key,
output_dir, model_path):
"""Run the prediction followed by inference using the estimator.
Args:
dataset: Dataset object fro the dataloader utility.
params: Parameters to feed to Estimator.
unet_model: Keras Unet Model.
key: The key to encrypt the model.
output_dir: The directory where the results file is saved.
model_path: The TLT model path for inference.
"""
backends = [StdOutBackend(Verbosity.VERBOSE)]
backends.append(JSONStreamBackend(Verbosity.VERBOSE, output_dir+"/log.txt"))
profile_logger = Logger(backends)
gpu_options = tf.compat.v1.GPUOptions()
config = tf.compat.v1.ConfigProto(gpu_options=gpu_options, allow_soft_placement=True)
config.gpu_options.allow_growth = True
run_config = tf.estimator.RunConfig(
save_summary_steps=1,
tf_random_seed=None,
session_config=config)
checkpoint_path, model_json = _extract_ckpt(model_path, key)
estimator = tf.estimator.Estimator(
model_fn=unet_fn,
model_dir=params.model_dir,
config=run_config,
params=params)
predict_steps = dataset.test_size
hooks = None
if params.benchmark:
hooks = [ProfilingHook(profile_logger,
batch_size=params.batch_size,
log_every=params.log_every,
warmup_steps=params.warmup_steps,
mode="test")]
predict_steps = params.warmup_steps * 2 * params.batch_size
predictions = estimator.predict(
input_fn=lambda: dataset.test_fn(
count=math.ceil(predict_steps/dataset.test_size)),
hooks=hooks, checkpoint_path=checkpoint_path,
)
img_names = dataset.get_test_image_names()
input_image_type = dataset.input_image_type
colors = get_color_id(dataset)
visualize_masks(predictions, output_dir, input_image_type, img_names, colors, "tlt",
params.resize_padding, params.resize_method,
activation=params["activation"])
def run_inference_trt(model_path, experiment_spec, output_dir, dataset, params, key="tlt_encode",
activation="softmax"):
"""Run the training loop using the estimator.
Args:
model_path: The path string where the trained model needs to be saved.
experiment_spec: Experiment spec proto.
output_dir: Folder to save the results text file.
dataset: Dataset object.
key: Key to encrypt the model.
"""
inferencer = Inferencer(keras_model=None, trt_engine_path=model_path,
dataset=dataset, batch_size=dataset._batch_size,
activation=params["activation"])
predictions, img_names = inferencer.infer(dataset.image_names_list)
input_image_type = dataset.input_image_type
colors = get_color_id(dataset)
visualize_masks(predictions, output_dir, input_image_type, img_names, colors, mode="trt",
resize_method=params.resize_method,
resize_padding=params.resize_padding,
activation=params["activation"])
def infer_unet(model_path, experiment_spec, output_dir, key=None):
"""Run the training loop using the estimator.
Args:
model_dir: The path string where the trained model needs to be saved.
experiment_spec: Experiment spec proto.
output_dir: Folder to save the results text file.
key: Key to encrypt the model.
"""
# Initialize the environment
initialize(experiment_spec)
# Initialize Params
params = initialize_params(experiment_spec)
target_classes = build_target_class_list(
experiment_spec.dataset_config.data_class_config)
target_classes_train_mapping = get_train_class_mapping(target_classes)
if not os.path.isdir(output_dir):
os.makedirs(output_dir)
with open(os.path.join(output_dir, 'target_class_id_mapping.json'), 'w') as fp:
json.dump(target_classes_train_mapping, fp)
model_ext = os.path.splitext(model_path)[1]
# Build run config
model_config = select_model_proto(experiment_spec)
unet_model = build_model(m_config=model_config,
target_class_names=target_classes)
model_dir = os.path.abspath(os.path.join(model_path, os.pardir))
custom_objs = None
model_json = None
# Update custom_objs with Internal TAO custom layers
custom_objs = get_custom_objs(model_arch=model_config.arch)
params = update_model_params(params=params, unet_model=unet_model,
experiment_spec=experiment_spec,
key=key, target_classes=target_classes,
results_dir=model_dir,
phase="test",
custom_objs=custom_objs,
model_json=model_json)
if params.enable_qat and not params.load_graph:
# We add QDQ nodes before session is formed
img_height, img_width, img_channels = \
experiment_spec.model_config.model_input_height, \
experiment_spec.model_config.model_input_width, \
experiment_spec.model_config.model_input_channels
model_qat_json = unet_model.construct_model(
input_shape=(img_channels, img_height, img_width),
pretrained_weights_file=params.pretrained_weights_file,
enc_key=params.key, model_json=params.model_json,
features=None, construct_qat=True)
model_qat_json = save_tmp_json(model_qat_json)
params.model_json = model_qat_json
dataset = Dataset(
batch_size=params.batch_size,
fold=params.crossvalidation_idx,
augment=params.augment,
params=params,
phase="test",
target_classes=target_classes)
if model_ext in ['.tlt', '']:
run_inference_tlt(dataset, params, unet_model,
key, output_dir, model_path)
elif model_ext in ['.engine', '.trt']:
run_inference_trt(model_path, experiment_spec, output_dir, dataset, params,
key=key, activation=params.activation)
else:
raise ValueError("Model extension needs to be either .engine or .trt.")
def run_experiment(model_path, config_path, output_dir,
override_spec_path=None, key=None):
"""
Launch experiment that does inference.
NOTE: Do not change the argument names without verifying that cluster submission works.
Args:
model_path (str): The model path for which the inference needs to be done.
config_path (list): List containing path to a text file containing a complete experiment
configuration and possibly a path to a .yml file containing override parameter values.
output_dir (str): Path to a folder where the output of the inference .
If the folder does not already exist, it will be created.
override_spec_path (str): Absolute path to yaml file which is used to overwrite some of the
experiment spec parameters.
key (str): Key to save and load models from tlt.
"""
if not os.path.exists(output_dir):
os.makedirs(output_dir)
status_file = os.path.join(output_dir, "status.json")
status_logging.set_status_logger(
status_logging.StatusLogger(
filename=status_file,
is_master=True,
append=False,
verbosity=logger.getEffectiveLevel()
)
)
status_logging.get_status_logger().write(
data=None,
status_level=status_logging.Status.STARTED,
message="Starting UNet Inference"
)
logger.debug("Starting experiment.")
# Load experiment spec.
if config_path is not None:
# Create an experiment_pb2.Experiment object from the input file.
logger.info("Loading experiment spec at %s.", config_path)
# The spec in experiment_spec_path has to be complete.
# Default spec is not merged into experiment_spec.
experiment_spec = load_experiment_spec(
config_path, merge_from_default=False)
else:
logger.info("Loading default ISBI single class experiment spec.")
experiment_spec = load_experiment_spec()
infer_unet(model_path, experiment_spec, output_dir, key=key)
logger.debug("Experiment complete.")
def build_command_line_parser(parser=None):
"""
Parse command-line flags passed to the training script.
Returns:
Namespace with all parsed arguments.
"""
if parser is None:
parser = argparse.ArgumentParser(
prog='inference', description='Inference of segmentation model.')
default_experiment_path = os.path.join(os.path.expanduser('~'), 'experiments')
parser.add_argument(
'-e',
'--experiment_spec',
type=str,
default=None,
help='Path to spec file. Absolute path or relative to working directory. \
If not specified, default spec from spec_loader.py is used.'
)
parser.add_argument(
'-o',
'--results_dir',
type=str,
default=default_experiment_path,
help='Path to a folder where experiment annotated outputs are saved.'
)
parser.add_argument(
'-m',
'--model_path',
type=str,
default=default_experiment_path,
help='Path to a folder from where the model should be taken for inference.'
)
parser.add_argument(
'-v',
'--verbose',
action='store_true',
help='Set verbosity level for the logger.'
)
parser.add_argument(
'-k',
'--key',
default="",
type=str,
required=False,
help='The key to load the model provided for inference.'
)
# Dummy arguments for Deploy
parser.add_argument(
'-i',
'--image_dir',
type=str,
required=False,
default=None,
help=argparse.SUPPRESS
)
parser.add_argument(
'-b',
'--batch_size',
type=int,
required=False,
default=1,
help=argparse.SUPPRESS
)
return parser
def parse_command_line_args(cl_args=None):
"""Parser command line arguments to the trainer.
Args:
cl_args(sys.argv[1:]): Arg from the command line.
Returns:
args: Parsed arguments using argparse.
"""
parser = build_command_line_parser(parser=None)
args = parser.parse_args(cl_args)
return args
def main(args=None):
"""Run the Inference process."""
args = parse_command_line_args(args)
# Set up logger verbosity.
verbosity = 'INFO'
if args.verbose:
verbosity = 'DEBUG'
# Configure the logger.
logging.basicConfig(format='%(asctime)s [TAO Toolkit] [%(levelname)s] %(name)s %(lineno)d: %(message)s',
level=verbosity)
# Configure tf logger verbosity.
tf.logging.set_verbosity(tf.logging.INFO)
run_experiment(config_path=args.experiment_spec,
model_path=args.model_path,
output_dir=args.results_dir,
key=args.key)
if __name__ == "__main__":
try:
main()
status_logging.get_status_logger().write(
status_level=status_logging.Status.SUCCESS,
message="Inference finished successfully."
)
except (KeyboardInterrupt, SystemExit):
status_logging.get_status_logger().write(
message="Inference was interrupted",
verbosity_level=status_logging.Verbosity.INFO,
status_level=status_logging.Status.FAILURE
)
except Exception as e:
status_logging.get_status_logger().write(
message=str(e),
status_level=status_logging.Status.FAILURE
)
raise e
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/unet/scripts/inference.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Perform Evaluation for the Unet Segmentation.
This code does the evaluation. Given the paths of th evalidation images and the
masks , it prints the miou, f1 score, recall, avg score metrics.
Short code breakdown:
(1) Creates the Runtime_config and creates the estimator
(2) Hook up the data pipe and estimator to unet model with backbones such as
Resnet, vanilla Unet
(3) Retrieves/ Encrypts the trained checkpoint.
(4) Performs Evaluation and prints the semantic segmentation metric and dumps
it a json file.
"""
import argparse
import collections
import json
import logging
import math
import os
import numpy as np
import tensorflow as tf
from tqdm import tqdm
import nvidia_tao_tf1.cv.common.logging.logging as status_logging
from nvidia_tao_tf1.cv.common.utils import get_model_file_size
from nvidia_tao_tf1.cv.unet.dllogger.logger import JSONStreamBackend, Logger, StdOutBackend, \
Verbosity
from nvidia_tao_tf1.cv.unet.hooks.profiling_hook import ProfilingHook
from nvidia_tao_tf1.cv.unet.model.build_unet_model import build_model
from nvidia_tao_tf1.cv.unet.model.build_unet_model import select_model_proto
from nvidia_tao_tf1.cv.unet.model.model_io import _extract_ckpt
from nvidia_tao_tf1.cv.unet.model.utilities import build_target_class_list, get_train_class_mapping
from nvidia_tao_tf1.cv.unet.model.utilities import get_custom_objs, get_pretrained_ckpt, \
update_model_params
from nvidia_tao_tf1.cv.unet.model.utilities import initialize, initialize_params, save_tmp_json
from nvidia_tao_tf1.cv.unet.spec_handler.spec_loader import load_experiment_spec
from nvidia_tao_tf1.cv.unet.utils.data_loader import Dataset
from nvidia_tao_tf1.cv.unet.utils.evaluate_trt import Evaluator
from nvidia_tao_tf1.cv.unet.utils.model_fn import unet_fn
logger = logging.getLogger(__name__)
tf.logging.set_verbosity(tf.logging.INFO)
def getScoreAverage(scoreList):
"""Compute the average score of all classes."""
validScores = 0
scoreSum = 0.0
for score in scoreList:
if not math.isnan(score):
validScores += 1
scoreSum += score
if validScores == 0:
return float('nan')
avg_score = scoreSum / validScores
return avg_score
def compute_metrics_masks(predictions_it, dataset, target_classes, params):
"""Compute metrics for semantic segmentation.
Args:
predictions(list): List of prediction numpy arrays.
img_names(list): The img_name of the test images
dataset(class object): Dataset object from the dataloader utility.
"""
num_classes = params.num_conf_mat_classes
conf_mat = np.zeros([num_classes, num_classes], dtype=np.float32)
for p in tqdm(predictions_it):
pred = p["conf_matrix"]
conf_mat += np.matrix(pred)
metrices = {}
perclass_tp = np.diagonal(conf_mat).astype(np.float32)
perclass_fp = conf_mat.sum(axis=0) - perclass_tp
perclass_fn = conf_mat.sum(axis=1) - perclass_tp
iou_per_class = perclass_tp/(perclass_fp+perclass_tp+perclass_fn)
precision_per_class = perclass_tp/(perclass_fp+perclass_tp)
recall_per_class = perclass_tp/(perclass_tp+perclass_fn)
train_id_name_mapping = get_train_class_mapping(target_classes)
f1_per_class = []
final_results_dic = {}
for num_class in range(num_classes):
name_class = "/".join(train_id_name_mapping[num_class])
per_class_metric = {}
prec = precision_per_class[num_class]
rec = recall_per_class[num_class]
iou = iou_per_class[num_class]
f1 = (2 * prec * rec)/float((prec + rec))
f1_per_class.append(f1)
per_class_metric["precision"] = prec
per_class_metric["Recall"] = rec
per_class_metric["F1 Score"] = f1
per_class_metric["iou"] = iou
final_results_dic[name_class] = per_class_metric
mean_iou_index = getScoreAverage(iou_per_class)
mean_rec = getScoreAverage(recall_per_class)
mean_precision = getScoreAverage(precision_per_class)
mean_f1_score = getScoreAverage(f1_per_class)
metrices["rec"] = mean_rec
metrices["prec"] = mean_precision
metrices["fmes"] = mean_f1_score
metrices["mean_iou_index"] = mean_iou_index
metrices["results_dic"] = final_results_dic
return metrices
def print_compute_metrics(dataset, predictions_it, output_dir, target_classes, params,
mode="tlt"):
"""Run the prediction followed by evaluation using the estimator.
Args:
estimator: estimator object wrapped with run config parameters.
dataset: Dataset object fro the dataloader utility.
params: Parameters to feed to Estimator.
unet_model: Keras Unet Model.
profile_logger: Logging the Evaluation updates.
key: The key to encrypt the model.
output_dir: The directory where the results file is saved.
"""
metrices = compute_metrics_masks(predictions_it, dataset, target_classes, params)
recall_str = "Recall : " + str(metrices["rec"])
precision_str = "Precision: " + str(metrices["prec"])
f1_score_str = "F1 score: " + str(metrices["fmes"])
mean_iou_str = "Mean IOU: " + str(metrices["mean_iou_index"])
results_str = [recall_str, precision_str, f1_score_str, mean_iou_str]
results_file = os.path.join(output_dir, "results_"+mode+".json")
metrices_str_categorical = {}
metrices_str = collections.defaultdict(dict)
for k, v in metrices["results_dic"].items():
class_name = str(k)
for metric_type, val in v.items():
metrices_str[str(metric_type)][class_name] = str(val)
metrices_str_categorical["categorical"] = metrices_str
# writing the results to a file
with open(results_file, 'w') as fp:
json.dump(str(metrices["results_dic"]), fp)
s_logger = status_logging.get_status_logger()
s_logger.kpi = {
"Mean IOU": metrices["mean_iou_index"],
"Average precision": metrices["prec"],
"Average recall": metrices["rec"],
"F1 score": metrices["fmes"],
"model size": params["model_size"],
}
s_logger.write(
data=metrices_str_categorical,
status_level=status_logging.Status.RUNNING)
for result in results_str:
# This will print the results to the stdout
print(result+"\n")
def run_evaluate_tlt(dataset, params, unet_model, load_graph, key,
output_dir, model_path, target_classes):
"""Run the prediction followed by evaluation using the estimator.
Args:
estimator: estimator object wrapped with run config parameters.
dataset: Dataset object fro the dataloader utility.
params: Parameters to feed to Estimator.
unet_model: Keras Unet Model.
profile_logger: Logging the Evaluation updates.
key: The key to encrypt the model.
output_dir: The directory where the results file is saved.
"""
backends = [StdOutBackend(Verbosity.VERBOSE)]
backends.append(JSONStreamBackend(Verbosity.VERBOSE, output_dir+"/log.txt"))
profile_logger = Logger(backends)
gpu_options = tf.compat.v1.GPUOptions()
config = tf.compat.v1.ConfigProto(gpu_options=gpu_options, allow_soft_placement=True)
run_config = tf.estimator.RunConfig(
save_summary_steps=1,
tf_random_seed=None,
session_config=config)
if load_graph:
checkpoint_path, model_json, _ = get_pretrained_ckpt(model_path, key)
else:
checkpoint_path, model_json = _extract_ckpt(model_path, key)
if params.load_graph:
# Need to set it True if directly loading pruned tlt model
assert model_json, \
"Load graph should be set only when inferring a pruned/re-trained model."
params["model_json"] = model_json
estimator = tf.estimator.Estimator(
model_fn=unet_fn,
model_dir=params.model_dir,
config=run_config,
params=params)
hooks = None
if params.benchmark:
hooks = [ProfilingHook(profile_logger,
batch_size=params.batch_size,
log_every=params.log_every,
warmup_steps=params.warmup_steps,
mode="test")]
predictions = estimator.predict(
input_fn=lambda: dataset.eval_fn(
count=1), hooks=hooks, checkpoint_path=checkpoint_path,
)
logger.info("Starting Evaluation.")
print_compute_metrics(dataset, predictions, output_dir, target_classes, params,
mode="tlt")
def run_evaluate_trt(model_path, experiment_spec, output_dir, dataset,
target_classes, params, key=None):
"""Run the evaluate loop using the estimator.
Args:
model_path: The path string where the trained model needs to be saved.
experiment_spec: Experiment spec proto.
output_dir: Folder to save the results text file.
dataset: Dataset object.
key: Key to encrypt the model.
"""
num_conf_mat_classes = params.num_conf_mat_classes
activation = params.activation
evaluator = Evaluator(keras_model=None, trt_engine_path=model_path,
dataset=dataset, batch_size=dataset._batch_size,
activation=activation, num_conf_mat_classes=num_conf_mat_classes)
predictions = evaluator.evaluate(dataset.image_names_list, dataset.masks_names_list)
print_compute_metrics(dataset, predictions, output_dir, target_classes,
params, mode="trt")
def evaluate_unet(model_path, experiment_spec, output_dir, key=None):
"""Run the evaluate loop using the estimator.
Args:
model_dir: The path string where the trained model needs to be saved.
experiment_spec: Experiment spec proto.
output_dir: Folder to save the results text file.
key: Key to encrypt the model.
"""
# Initialize the environment
initialize(experiment_spec)
# Initialize Params
params = initialize_params(experiment_spec)
target_classes = build_target_class_list(
experiment_spec.dataset_config.data_class_config)
# Build run config
model_config = select_model_proto(experiment_spec)
unet_model = build_model(m_config=model_config,
target_class_names=target_classes)
model_dir = os.path.abspath(os.path.join(model_path, os.pardir))
model_ext = os.path.splitext(model_path)[1]
custom_objs = None
model_json = None
# Update custom_objs with Internal TAO custom layers
custom_objs = get_custom_objs(model_arch=model_config.arch)
params = update_model_params(params=params, unet_model=unet_model,
experiment_spec=experiment_spec,
key=key, target_classes=target_classes,
results_dir=model_dir,
phase="val",
model_json=model_json,
custom_objs=custom_objs)
params["model_size"] = get_model_file_size(model_path)
if params.enable_qat and not params.load_graph:
# Eval is done by using model_json of pruned ckpt if load_graph is set
# QAT nodes are added only for non-pruned graph
# We add QDQ nodes before session is formed
img_height, img_width, img_channels = \
experiment_spec.model_config.model_input_height, \
experiment_spec.model_config.model_input_width, \
experiment_spec.model_config.model_input_channels
model_qat_json = unet_model.construct_model(
input_shape=(img_channels, img_height, img_width),
pretrained_weights_file=params.pretrained_weights_file,
enc_key=params.key, model_json=params.model_json,
features=None, construct_qat=True)
model_qat_json = save_tmp_json(model_qat_json)
params.model_json = model_qat_json
dataset = Dataset(
batch_size=params.batch_size,
fold=params.crossvalidation_idx,
augment=params.augment,
params=params,
phase="val",
target_classes=target_classes)
if model_ext in ['.tlt', '']:
run_evaluate_tlt(dataset, params, unet_model, params.load_graph,
key, output_dir, model_path, target_classes)
elif model_ext in ['.engine', '.trt']:
run_evaluate_trt(model_path, experiment_spec, output_dir, dataset,
target_classes, params, key=key)
else:
raise ValueError("Model extension needs to be either .engine or .trt.")
def run_experiment(model_path, config_path, output_dir,
override_spec_path=None, key=None):
"""
Launch experiment that evaluates the model.
NOTE: Do not change the argument names without verifying that cluster submission works.
Args:
model_dir (str): The model path that contains the latest checkpoint for evaluating.
config_path (list): List containing path to a text file containing a complete experiment
configuration and possibly a path to a .yml file containing override parameter values.
output_dir (str): Path to a folder where the output of the evaluation .
If the folder does not already exist, it will be created.
override_spec_path (str): Absolute path to yaml file which is used to overwrite some of the
experiment spec parameters.
key (str): Key to save and load models from tlt.
"""
if not os.path.exists(output_dir):
os.makedirs(output_dir)
status_file = os.path.join(output_dir, "status.json")
status_logging.set_status_logger(
status_logging.StatusLogger(
filename=status_file,
is_master=True,
append=False,
verbosity=logger.getEffectiveLevel()
)
)
status_logging.get_status_logger().write(
data=None,
status_level=status_logging.Status.STARTED,
message="Starting UNet Evaluation"
)
logger.debug("Starting experiment.")
# Load experiment spec.
if config_path is not None:
# Create an experiment_pb2.Experiment object from the input file.
logger.info("Loading experiment spec at %s.", config_path)
# The spec in experiment_spec_path has to be complete.
# Default spec is not merged into experiment_spec.
experiment_spec = load_experiment_spec(
config_path, merge_from_default=False)
else:
logger.info("Loading default ISBI single class experiment spec.")
experiment_spec = load_experiment_spec()
evaluate_unet(model_path, experiment_spec, output_dir, key=key)
logger.debug("Experiment complete.")
def build_command_line_parser(parser=None):
"""
Parse command-line flags passed to the evaluation script.
Returns:
Namespace with all parsed arguments.
"""
if parser is None:
parser = argparse.ArgumentParser(prog='Evaluate',
description='Evaluate the segmentation model.')
default_experiment_path = os.path.join(os.path.expanduser('~'), 'experiments')
parser.add_argument(
'-e',
'--experiment_spec',
type=str,
default=None,
help='Path to spec file. Absolute path or relative to working directory. \
If not specified, default spec from spec_loader.py is used.'
)
parser.add_argument(
'-o',
'--results_dir',
type=str,
default=default_experiment_path,
help='Path to a folder where experiment outputs metrics json should be \
written.'
)
parser.add_argument(
'-m',
'--model_path',
type=str,
default=default_experiment_path,
help='Path to a folder from where the model should be taken for evaluation.'
)
parser.add_argument(
'-v',
'--verbose',
action='store_true',
help='Set verbosity level for the logger.'
)
parser.add_argument(
'-k',
'--key',
type=str,
default="",
required=False,
help='The key to load model provided for evaluation.'
)
# Dummy arguments for Deploy
parser.add_argument(
'-i',
'--image_dir',
type=str,
required=False,
default=None,
help=argparse.SUPPRESS
)
parser.add_argument(
'-l',
'--label_dir',
type=str,
required=False,
help=argparse.SUPPRESS
)
parser.add_argument(
'-b',
'--batch_size',
type=int,
required=False,
default=1,
help=argparse.SUPPRESS
)
return parser
def parse_command_line_args(cl_args=None):
"""Parser command line arguments to the trainer.
Args:
cl_args(sys.argv[1:]): Arg from the command line.
Returns:
args: Parsed arguments using argparse.
"""
parser = build_command_line_parser(parser=None)
args = parser.parse_args(cl_args)
return args
def main(args=None):
"""Run the evaluation process."""
args = parse_command_line_args(args)
# Set up logger verbosity.
verbosity = 'INFO'
if args.verbose:
verbosity = 'DEBUG'
# Configure the logger.
logging.basicConfig(format='%(asctime)s [TAO Toolkit] [%(levelname)s] %(name)s %(lineno)d: %(message)s',
level=verbosity)
# Configure tf logger verbosity.
tf.logging.set_verbosity(tf.logging.INFO)
run_experiment(config_path=args.experiment_spec,
model_path=args.model_path,
output_dir=args.results_dir,
key=args.key)
if __name__ == "__main__":
try:
main()
status_logging.get_status_logger().write(
status_level=status_logging.Status.SUCCESS,
message="Evaluation finished successfully."
)
except (KeyboardInterrupt, SystemExit):
status_logging.get_status_logger().write(
message="Evaluation was interrupted",
verbosity_level=status_logging.Verbosity.INFO,
status_level=status_logging.Status.FAILURE
)
except Exception as e:
status_logging.get_status_logger().write(
message=str(e),
status_level=status_logging.Status.FAILURE
)
raise e
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/unet/scripts/evaluate.py |
# Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
"""TLT command line wrapper to invoke CLI scripts."""
import sys
from nvidia_tao_tf1.cv.common.entrypoint.entrypoint import launch_job
import nvidia_tao_tf1.cv.unet.scripts
def main():
"""Function to launch the job."""
launch_job(nvidia_tao_tf1.cv.unet.scripts, "unet", sys.argv[1:])
if __name__ == "__main__":
main()
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/unet/entrypoint/unet.py |
# Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
"""Unet Entrypoint."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/unet/entrypoint/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""EfficientNet Unet model class that takes care of constructing and validating a model."""
import logging
import keras
from keras.models import Model
from nvidia_tao_tf1.core.templates.efficientnet import EfficientNetB0
from nvidia_tao_tf1.cv.unet.model.layers import Conv2DTranspose_block
from nvidia_tao_tf1.cv.unet.model.unet_model import UnetModel
logger = logging.getLogger(__name__)
eff_dict = {'efficientnet_b0': ('block1a_project_bn', 'block2a_project_bn',
'block3a_project_bn', 'block5a_project_bn')}
class EfficientUnet(UnetModel):
"""Efficientnet Unet class."""
def __init__(self, *args, **kwargs):
"""Init function.
Args:
num_layers (int): Number of layers for scalable feature extractors.
use_pooling (bool): Whether to add pooling layers to the feature extractor.
use_batch_norm (bool): Whether to add batch norm layers.
dropout_rate (float): Fraction of the input units to drop. 0.0 means dropout is
not used.
target_class_names (list): A list of target class names.
freeze_pretrained_layers (bool): Prevent updates to pretrained layers' parameters.
allow_loaded_model_modification (bool): Allow loaded model modification.
template (str): Model template to use for feature extractor.
freeze_bn (bool): The boolean to freeze BN or not.
load_graph (bool): The boolean to laod graph for phase 1.
"""
super(EfficientUnet, self).__init__(*args, **kwargs)
def construct_decoder_model(self, encoder_model, export=False):
"""Construct the decoder for Unet with EfficientNet as backbone.
Args:
encoder_model (keras.model): keras model type.
export (bool): Set the inference flag to build the
inference model with softmax.
Returns:
model (keras.model): The entire Unet model with encoder and decoder.
"""
B1, B2, B3, B4 = eff_dict[self.template]
S2 = encoder_model.get_layer(B1).output
S3 = encoder_model.get_layer(B2).output
S4 = encoder_model.get_layer(B3).output
S5 = encoder_model.get_layer(B4).output
skips = [S2, S3, S4, S5]
out = encoder_model.output
for filter_tmp in [512, 256, 128, 64, 32]:
if skips:
skip_to_use = skips.pop()
else:
skip_to_use = None
out = Conv2DTranspose_block(input_tensor=out, filters=filter_tmp,
initializer="glorot_uniform",
skip=skip_to_use,
use_batchnorm=self.use_batch_norm,
freeze_bn=self.freeze_bn)
out = keras.layers.Conv2D(self.num_target_classes, (1, 1), padding='same',
data_format="channels_first")(out)
if export:
logger.debug("Building model for export")
out = self.get_activation_for_export(out)
model_unet = Model(inputs=encoder_model.input, outputs=out)
return model_unet
def get_base_model(self, args, kwargs):
"""Function to construct model specific backbone."""
model_class = EfficientNetB0
kwargs['add_head'] = False
kwargs['input_tensor'] = args[1]
kwargs['stride16'] = True
while args:
args.pop()
model = model_class(*args, **kwargs)
return model
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/unet/model/efficientnet_unet.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Skip Net decoder Unet model class that takes care of constructing and validating a model."""
import logging
import keras
from keras.models import Model
from nvidia_tao_tf1.core.templates.shufflenet import ShuffleNet
from nvidia_tao_tf1.cv.unet.model.layers import conv2D_bn, convTranspose2D_bn
from nvidia_tao_tf1.cv.unet.model.unet_model import UnetModel
logger = logging.getLogger(__name__)
eff_dict = {'shufflenet': ('stage4/block4/relu_out', 'stage3/block8/relu_out',
'stage2/block4/relu_out', 'maxpool1', 'conv1', 'score_fr')}
class ShuffleNetUnet(UnetModel):
"""Efficientnet Unet class."""
def __init__(self, *args, **kwargs):
"""Init function.
Args:
num_layers (int): Number of layers for scalable feature extractors.
use_pooling (bool): Whether to add pooling layers to the feature extractor.
use_batch_norm (bool): Whether to add batch norm layers.
dropout_rate (float): Fraction of the input units to drop. 0.0 means dropout is
not used.
target_class_names (list): A list of target class names.
freeze_pretrained_layers (bool): Prevent updates to pretrained layers' parameters.
allow_loaded_model_modification (bool): Allow loaded model modification.
template (str): Model template to use for feature extractor.
freeze_bn (bool): The boolean to freeze BN or not.
load_graph (bool): The boolean to laod graph for phase 1.
"""
super(ShuffleNetUnet, self).__init__(*args, **kwargs)
def construct_decoder_model(self, encoder_model, export=False):
"""Construct the decoder for Unet with EfficientNet as backbone.
Args:
encoder_model (keras.model): keras model type.
export (bool): Set the inference flag to build the
inference model with softmax.
Returns:
model (keras.model): The entire Unet model with encoder and decoder.
"""
_, stage3, stage2, _, _, score_fr = eff_dict[self.template]
stage3_out = encoder_model.get_layer(stage3).output
stage2_out = encoder_model.get_layer(stage2).output
score_fr_out = encoder_model.get_layer(score_fr).output
upscore = convTranspose2D_bn(x=score_fr_out, filters=self.num_target_classes,
freeze_bn=False, use_batchnorm=True)
score_feed1 = conv2D_bn(x=stage3_out, filters=self.num_target_classes,
freeze_bn=False, kernel_size=(1, 1), use_batchnorm=True)
fuse_feed1 = keras.layers.add([upscore, score_feed1])
upscore4 = convTranspose2D_bn(x=fuse_feed1, filters=self.num_target_classes,
freeze_bn=False, use_batchnorm=True)
score_feed2 = conv2D_bn(x=stage2_out, filters=self.num_target_classes,
freeze_bn=False, kernel_size=(1, 1), use_batchnorm=True)
fuse_feed2 = keras.layers.add([upscore4, score_feed2])
out = keras.layers.Conv2DTranspose(
filters=self.num_target_classes,
kernel_size=(16, 16),
strides=(8, 8),
padding='same',
activation=None,
data_format="channels_first")(fuse_feed2)
if export:
logger.debug("Building model for export")
out = keras.layers.Permute((2, 3, 1))(out)
out = keras.layers.Softmax(axis=-1)(out)
model_unet = Model(inputs=encoder_model.input, outputs=out)
print(model_unet.summary())
return model_unet
def get_base_model(self, args, kwargs):
"""Function to construct model specific backbone."""
model_class = ShuffleNet
model = model_class(input_shape=kwargs['input_shape'], classes=self.num_target_classes)
return model
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/unet/model/shufflenet_unet.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Vanilla Unet model class that takes care of constructing and validating a model."""
import logging
import keras
from keras.layers import Input
from keras.models import Model
import tensorflow as tf
from nvidia_tao_tf1.cv.unet.model.layers import bottleneck, downsample_block, input_block, \
output_block, upsample_block
from nvidia_tao_tf1.cv.unet.model.unet_model import UnetModel
logger = logging.getLogger(__name__)
class VanillaUnet(UnetModel):
"""VanillaUnet class."""
def __init__(self, *args, **kwargs):
"""Init function.
Args:
num_layers (int): Number of layers for scalable feature extractors.
use_pooling (bool): Whether to add pooling layers to the feature extractor.
use_batch_norm (bool): Whether to add batch norm layers.
dropout_rate (float): Fraction of the input units to drop. 0.0 means dropout is
not used.
target_class_names (list): A list of target class names.
freeze_pretrained_layers (bool): Prevent updates to pretrained layers' parameters.
allow_loaded_model_modification (bool): Allow loaded model modification.
template (str): Model template to use for feature extractor.
freeze_bn (bool): The boolean to freeze BN or not.
load_graph (bool): The boolean to laod graph for phase 1.
"""
super(VanillaUnet, self).__init__(*args, **kwargs)
def construct_encoder_decoder_model(self, input_shape, mode, features, export):
"""Generate a encoder/ decoder model.
Args:
input_shape: model input shape (N,C,H,W). N is ignored.
data_format: Order of the dimensions (C, H, W).
kernel_regularizer: Keras regularizer to be applied to convolution
kernels.
bias_regularizer: Keras regularizer to be applied to biases.
features: Fed by the estimator during training.
Raises:
AssertionError: If the model is already constructed.
Returns:
model (keras.model): The model for feature extraction.
"""
assert not self.constructed, "Model already constructed."
skip_connections = []
assert (input_shape[1] == input_shape[2] == 572), \
"model_input_width and model_input_height should be set 572 \
for Vanilla Unet in the spec."
if features:
inputs = Input(tensor=features, shape=input_shape)
else:
inputs = Input(shape=input_shape)
out, skip = input_block(inputs, filters=64)
skip_connections.append(skip)
for filters in [128, 256, 512]:
out, skip = downsample_block(out, filters=filters)
skip_connections.append(skip)
out = keras.layers.Conv2D(
filters=1024,
kernel_size=(3, 3),
activation=tf.nn.relu)(out)
out = keras.layers.Conv2D(
filters=1024,
kernel_size=(3, 3),
activation=tf.nn.relu)(out)
out = bottleneck(out, filters_up=512, mode=mode)
# Constructing the decoder
for filters in [(512, 4, 256), (256, 16, 128), (128, 40, 64)]:
out = upsample_block(out,
residual_input=skip_connections.pop(),
filters=filters)
out = output_block(out, residual_input=skip_connections.pop(),
filters=64, n_classes=self.num_target_classes)
if export:
# When export the output is logits, applying softmax on it for final
# output
logger.debug("Building model for export")
out = self.get_activation_for_export(out)
model_unet_v1 = Model(inputs=inputs, outputs=out)
return model_unet_v1
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/unet/model/vanilla_unet.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Build the Unet model from the spec."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from nvidia_tao_tf1.cv.unet.model.efficientnet_unet import EfficientUnet
from nvidia_tao_tf1.cv.unet.model.resnet_unet import ResnetUnet
from nvidia_tao_tf1.cv.unet.model.shufflenet_unet import ShuffleNetUnet
from nvidia_tao_tf1.cv.unet.model.unet_model import UnetModel
from nvidia_tao_tf1.cv.unet.model.vanilla_unet import VanillaUnet
from nvidia_tao_tf1.cv.unet.model.vanilla_unet_dynamic import VanillaUnetDynamic
from nvidia_tao_tf1.cv.unet.model.vgg_unet import VggUnet
from nvidia_tao_tf1.cv.unet.proto.model_config_pb2 import ModelConfig
def select_model_proto(experiment_spec):
"""Select the model proto depending on type defined in the spec.
Args:
experiment_spec: nvidia_tao_tf1.cv.detectnet_v2.proto.experiment proto message.
Returns:
model_proto (ModelConfig):
Raises:
ValueError: If model_config_type is not valid.
"""
return experiment_spec.model_config
def get_base_model_config(experiment_spec):
"""Get the model config from the experiment spec.
Args:
experiment_spec: nvidia_tao_tf1.cv.unet.proto.experiment proto message.
Returns:
model_config (ModelConfig): Model configuration proto.
Raises:
ValueError: If model config proto of the given experiment spec is of unknown type.
"""
model_config = select_model_proto(experiment_spec)
if isinstance(model_config, ModelConfig):
return model_config
raise ValueError("Model config is of unknown type.")
def build_model(m_config, target_class_names, seed=None):
"""Build a Unet model.
The model is a Unet instance.
Arguments:
m_config (ModelConfig): Model configuration proto.
target_class_names (list): A list of target class names that has details
of label_id/ train_id/ class name.
Returns:
A Unet model. By default, a Unet instance with resnet feature extractor
is returned.
"""
# model_config.num_layers is checked during unet.construct_model. Only certain values
# are supported.
MODEL_CLASS = {
'resnet': ResnetUnet,
'vgg': VggUnet,
'vanilla_unet_dynamic': VanillaUnetDynamic,
'vanilla_unet': VanillaUnet,
'efficientnet_b0': EfficientUnet,
'shufflenet': ShuffleNetUnet
}
# Initial dictionary of the arguments for building the model.
model_constructor_arguments = dict()
assert isinstance(m_config, ModelConfig), \
"Unsupported model_proto message."
# To check if backbone is supported
assert m_config.arch in ["resnet", "vgg", "vanilla_unet", "efficientnet_b0",
"vanilla_unet_dynamic", "shufflenet"], \
"The given arch: " + m_config.arch + " is not supported"
# Check sanity of the parameters.
if m_config.dropout_rate < 0.0 or m_config.dropout_rate > 1.0:
raise ValueError("ModelConfig.dropout_rate must be >= 0 and <= 1")
# Common model building arguments for all model types.
args = {'num_layers': m_config.num_layers if m_config.num_layers else 18,
'use_pooling': m_config.use_pooling,
'use_batch_norm': m_config.use_batch_norm,
'dropout_rate': m_config.dropout_rate if m_config.dropout_rate else 0.0,
'activation_config': m_config.activation,
'target_class_names': target_class_names if target_class_names else None,
'freeze_pretrained_layers': m_config.freeze_pretrained_layers,
'freeze_blocks': m_config.freeze_blocks if m_config.freeze_blocks else None,
'freeze_bn': m_config.freeze_bn,
'allow_loaded_model_modification': m_config.allow_loaded_model_modification,
'all_projections': m_config.all_projections,
'load_graph': m_config.load_graph,
'initializer': m_config.initializer,
'seed': seed,
'enable_qat': m_config.enable_qat
}
# Switch to default template if feature extractor template is missing.
if not m_config.arch:
pass
else:
args['template'] = m_config.arch
model_constructor_arguments.update(args)
model_class = MODEL_CLASS[args['template']]
return model_class(**model_constructor_arguments)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/unet/model/build_unet_model.py |
# Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
"""IVA model definition codes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/unet/model/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""VGG Unet model class that takes care of constructing and validating a model."""
import logging
import keras
from keras import backend as K
from keras.models import Model
from nvidia_tao_tf1.core.templates.vgg import VggNet
from nvidia_tao_tf1.cv.unet.model.unet_model import UnetModel
logger = logging.getLogger(__name__)
vgg_dict = {'vgg': ('block_1a_relu', 'block1_pool',
'block_2a_relu', 'block2_pool',
'block_3a_relu', 'block_3b_relu', 'block3_pool',
'block_4a_relu', 'block_4b_relu', 'block4_pool',
'block_5a_relu', 'block_5b_relu', 'block_5c_relu')}
class VggUnet(UnetModel):
"""VggUnet Unet class."""
def __init__(self, *args, **kwargs):
"""Init function.
Args:
num_layers (int): Number of layers for scalable feature extractors.
use_pooling (bool): Whether to add pooling layers to the feature extractor.
use_batch_norm (bool): Whether to add batch norm layers.
dropout_rate (float): Fraction of the input units to drop. 0.0 means dropout is
not used.
target_class_names (list): A list of target class names.
freeze_pretrained_layers (bool): Prevent updates to pretrained layers' parameters.
allow_loaded_model_modification (bool): Allow loaded model modification.
template (str): Model template to use for feature extractor.
freeze_bn (bool): The boolean to freeze BN or not.
load_graph (bool): The boolean to laod graph for phase 1.
"""
super(VggUnet, self).__init__(*args, **kwargs)
def construct_decoder_model(self, encoder_model, export=False):
"""Construct the decoder for Unet with VGG as backbone.
Args:
encoder_model (keras.model): keras model type.
export (bool): Set the inference flag to build the
inference model with softmax.
Returns:
model (keras.model): The entire Unet model with encoder and decoder.
"""
vgg_layers = vgg_dict[self.template]
conv1_1 = encoder_model.get_layer(vgg_layers[0]).output
conv1_2 = encoder_model.get_layer(vgg_layers[1]).output
conv2_2 = encoder_model.get_layer(vgg_layers[3]).output
conv3_3 = encoder_model.get_layer(vgg_layers[6]).output
conv4_3 = encoder_model.get_layer(vgg_layers[9]).output
conv5_3 = encoder_model.get_layer(vgg_layers[12]).output
conv5_3 = keras.layers.MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same',
data_format="channels_first")(conv5_3)
# First Upsampling
upscale1 = keras.layers.Conv2DTranspose(
filters=K.int_shape(conv4_3)[1], kernel_size=(4, 4), strides=(2, 2),
padding='same', data_format="channels_first")(conv5_3)
concat_axis = 3 if K.image_data_format() == 'channels_last' else 1
concat1 = keras.layers.Concatenate(axis=concat_axis)([upscale1, conv4_3])
expand11 = keras.layers.Conv2D(
filters=K.int_shape(conv4_3)[1],
kernel_size=(3, 3), padding='same')(concat1)
expand12 = keras.layers.Conv2D(
filters=K.int_shape(conv4_3)[1], kernel_size=(3, 3),
padding='same')(expand11)
# Second Block Upsampling
upscale2 = keras.layers.Conv2DTranspose(filters=K.int_shape(conv3_3)[1],
kernel_size=(4, 4),
strides=(2, 2),
padding='same',
data_format="channels_first")(expand12)
concat2 = keras.layers.Concatenate(axis=concat_axis)([upscale2, conv3_3])
expand21 = keras.layers.Conv2D(
filters=K.int_shape(conv3_3)[1],
kernel_size=(3, 3), padding='same')(concat2)
expand22 = keras.layers.Conv2D(
filters=K.int_shape(conv3_3)[1], kernel_size=(3, 3), padding='same')(expand21)
# Third Block Upsampling
upscale3 = keras.layers.Conv2DTranspose(
filters=K.int_shape(conv2_2)[1], kernel_size=(4, 4), strides=(2, 2),
padding='same', data_format="channels_first")(expand22)
concat3 = keras.layers.Concatenate(axis=concat_axis)([upscale3, conv2_2])
expand31 = keras.layers.Conv2D(
filters=K.int_shape(conv2_2)[1],
kernel_size=(3, 3), padding='same')(concat3)
expand32 = keras.layers.Conv2D(
filters=K.int_shape(conv2_2)[1], kernel_size=(3, 3), padding='same')(expand31)
# Fourth Block Upsampling
upscale4 = keras.layers.Conv2DTranspose(
filters=K.int_shape(conv1_2)[1], kernel_size=(4, 4), strides=(2, 2),
padding='same', data_format="channels_first")(expand32)
concat4 = keras.layers.Concatenate(axis=concat_axis)([upscale4, conv1_2])
expand41 = keras.layers.Conv2D(
filters=K.int_shape(conv1_2)[1],
kernel_size=(3, 3), padding='same')(concat4)
expand42 = keras.layers.Conv2D(
filters=K.int_shape(conv1_2)[1], kernel_size=(3, 3), padding='same')(expand41)
# Fifth Block Upsampling
upscale5 = keras.layers.Conv2DTranspose(
filters=K.int_shape(conv1_1)[1], kernel_size=(4, 4), strides=(2, 2),
padding='same', data_format="channels_first")(expand42)
concat5 = keras.layers.Concatenate(axis=concat_axis)([upscale5, conv1_1])
expand51 = keras.layers.Conv2D(
filters=K.int_shape(conv1_1)[1], kernel_size=(3, 3), padding='same')(concat5)
expand52 = keras.layers.Conv2D(
filters=K.int_shape(conv1_1)[1], kernel_size=(3, 3), padding='same')(expand51)
# Output block
out = keras.layers.Conv2D(filters=self.num_target_classes,
kernel_size=(1, 1), padding='same')(expand52)
if export:
logger.debug("Building model for export")
out = self.get_activation_for_export(out)
model_unet = Model(inputs=encoder_model.input, outputs=out)
return model_unet
def get_base_model(self, args, kwargs):
"""Function to construct model specific backbone."""
model_class = VggNet
model = model_class(*args, **kwargs)
return model
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/unet/model/vgg_unet.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions to load model for export and other model i/o utilities."""
import logging
import os
import tempfile
from zipfile import BadZipFile, ZipFile
import keras
import tensorflow as tf
from nvidia_tao_tf1.core.models.templates.qdq_layer import QDQ
from nvidia_tao_tf1.core.models.templates.quantized_conv2d import QuantizedConv2D
from nvidia_tao_tf1.core.models.templates.quantized_conv2dtranspose import QuantizedConv2DTranspose
from nvidia_tao_tf1.core.models.templates.quantized_dense import QuantizedDense
from nvidia_tao_tf1.core.models.templates.quantized_depthwiseconv2d import QuantizedDepthwiseConv2D
from nvidia_tao_tf1.cv.unet.model.build_unet_model import build_model
from nvidia_tao_tf1.cv.unet.model.build_unet_model import select_model_proto
from nvidia_tao_tf1.cv.unet.model.utilities import build_regularizer
from nvidia_tao_tf1.cv.unet.model.utilities import build_target_class_list, save_tmp_json
from nvidia_tao_tf1.cv.unet.model.utilities import initialize, initialize_params
from nvidia_tao_tf1.encoding import encoding
logger = logging.getLogger(__name__)
QAT_LAYERS = [
QuantizedConv2D,
QuantizedDepthwiseConv2D,
QDQ,
QuantizedDense,
QuantizedConv2DTranspose,
]
def _extract_ckpt(encoded_checkpoint, key):
"""Get unencrypted checkpoint from tlt file."""
temp_zip_path = None
if os.path.isdir(encoded_checkpoint):
temp_dir = encoded_checkpoint
else:
temp_dir = tempfile.mkdtemp()
logger.info("Loading weights from {}".format(encoded_checkpoint))
os_handle, temp_zip_path = tempfile.mkstemp()
os.close(os_handle)
# Decrypt the checkpoint file.
with open(encoded_checkpoint, 'rb') as encoded_file, open(temp_zip_path, 'wb') as tmp_zipf:
encoding.decode(encoded_file, tmp_zipf, key.encode())
encoded_file.closed
tmp_zipf.closed
# Load zip file and extract members to a tmp_directory.
try:
with ZipFile(temp_zip_path, 'r') as zip_object:
for member in zip_object.namelist():
zip_object.extract(member, path=temp_dir)
except BadZipFile:
raise ValueError("Please double check your encryption key.")
except Exception:
raise IOError("The last checkpoint file is not saved properly. \
Please delete it and rerun the script.")
model_json = None
json_files = [os.path.join(temp_dir, f) for f in os.listdir(temp_dir) if f.endswith(".json")]
if len(json_files) > 0:
model_json = json_files[0]
meta_files = [f for f in os.listdir(temp_dir) if f.endswith(".meta")]
# This functions is used duing inference/ eval. Means user passed pruned model.
assert(len(meta_files) > 0), "In case of pruned model, please set the load_graph to true."
if "-" in meta_files[0]:
print(meta_files[0])
step = int(meta_files[0].split('model.ckpt-')[-1].split('.')[0])
# Removing the temporary zip path.
if temp_zip_path:
os.remove(temp_zip_path)
return os.path.join(temp_dir, "model.ckpt-{}".format(step)), model_json
if temp_zip_path:
os.remove(temp_zip_path)
return os.path.join(temp_dir, "model.ckpt"), model_json
def method_saver(latest):
"""A function to load weights to tensorflow graph."""
sess = keras.backend.get_session()
tf.global_variables_initializer()
new_saver = tf.compat.v1.train.Saver()
new_saver.restore(sess, latest)
logger.info("Loaded weights Successfully for Export")
def load_keras_model(experiment_spec, model_path, export=False, key=None):
"""A function to load keras model."""
initialize(experiment_spec)
# Initialize Params
params = initialize_params(experiment_spec)
target_classes = build_target_class_list(
experiment_spec.dataset_config.data_class_config)
model_config = select_model_proto(experiment_spec)
custom_objs = None
unet_model = build_model(m_config=model_config,
target_class_names=target_classes)
checkpoint_path, model_json = _extract_ckpt(model_path, key)
if model_json:
# If there is a json in tlt, it is a pruned model
params["model_json"] = model_json
kernel_regularizer, bias_regularizer = build_regularizer(
experiment_spec.training_config.regularizer)
# Constructing the unet model
img_height, img_width, img_channels = experiment_spec.model_config.model_input_height, \
experiment_spec.model_config.model_input_width, \
experiment_spec.model_config.model_input_channels
if params.enable_qat:
# construct the QAT graph and save as a json file
model_qat_json = unet_model.construct_model(
input_shape=(img_channels, img_height, img_width),
pretrained_weights_file=params.pretrained_weights_file,
enc_key=params.key, model_json=params.model_json,
features=None, construct_qat=True, custom_objs=custom_objs)
model_qat_json = save_tmp_json(model_qat_json)
params.model_json = model_qat_json
unet_model.construct_model(input_shape=(img_channels, img_height, img_width),
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
pretrained_weights_file=params.pretrained_weights_file,
enc_key=key, export=export, model_json=params.model_json,
custom_objs=custom_objs)
keras_model = unet_model.keras_model
keras_model.summary()
method_saver(checkpoint_path)
return keras_model, custom_objs
def check_for_quantized_layers(model):
"""Check Keras model for quantization layers."""
for layer in model.layers:
if type(layer) in QAT_LAYERS:
return True
return False
def save_keras_model(keras_model, model_path, key, save_format=None):
"""Utility function to save pruned keras model as .tlt."""
if not os.path.isdir(model_path):
os.makedirs(model_path)
# Saving session to the zip file.
model_json = keras_model.to_json()
with open(os.path.join(model_path, "model.json"), "w") as json_file:
json_file.write(model_json)
# serialize weights to HDF5
keras_model.save_weights(os.path.join(model_path, "model.h5"))
saver = tf.train.Checkpoint()
keras.backend.get_session()
saver.save(os.path.join(model_path, "model.ckpt"))
keras.backend.clear_session()
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/unet/model/model_io.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Vanilla Unet Dynamic model class that takes care of constructing decoder model."""
import logging
import keras
from keras.layers import Input
from keras.models import Model
from nvidia_tao_tf1.cv.unet.model.layers import conv_block, decoder_block, encoder_block
from nvidia_tao_tf1.cv.unet.model.unet_model import UnetModel
logger = logging.getLogger(__name__)
class VanillaUnetDynamic(UnetModel):
"""VanillaUnetDynamic class."""
def __init__(self, *args, **kwargs):
"""Init function.
Args:
num_layers (int): Number of layers for scalable feature extractors.
use_pooling (bool): Whether to add pooling layers to the feature extractor.
use_batch_norm (bool): Whether to add batch norm layers.
dropout_rate (float): Fraction of the input units to drop. 0.0 means dropout is
not used.
target_class_names (list): A list of target class names.
freeze_pretrained_layers (bool): Prevent updates to pretrained layers' parameters.
allow_loaded_model_modification (bool): Allow loaded model modification.
template (str): Model template to use for feature extractor.
freeze_bn (bool): The boolean to freeze BN or not.
load_graph (bool): The boolean to laod graph for phase 1.
"""
super(VanillaUnetDynamic, self).__init__(*args, **kwargs)
def construct_encoder_decoder_model(self, input_shape, mode, features, export):
"""Generate a encoder/ decoder model.
Args:
input_shape: model input shape (N,C,H,W). N is ignored.
data_format: Order of the dimensions (C, H, W).
kernel_regularizer: Keras regularizer to be applied to convolution
kernels.
bias_regularizer: Keras regularizer to be applied to biases.
features: Fed by the estimator during training.
Raises:
AssertionError: If the model is already constructed.
Returns:
model (keras.model): The model for feature extraction.
"""
assert not self.constructed, "Model already constructed."
if features:
inputs = Input(tensor=features, shape=input_shape)
else:
inputs = Input(shape=input_shape)
encoder0_pool, encoder0 = encoder_block(inputs, 32, 0, self.use_batch_norm,
self.freeze_bn, self.initializer) # 128
encoder1_pool, encoder1 = encoder_block(encoder0_pool, 64, 1,
self.use_batch_norm, self.freeze_bn,
self.initializer) # 64
encoder2_pool, encoder2 = encoder_block(encoder1_pool, 128, 2,
self.use_batch_norm, self.freeze_bn,
self.initializer) # 32
encoder3_pool, encoder3 = encoder_block(encoder2_pool, 256, 3,
self.use_batch_norm, self.freeze_bn,
self.initializer) # 16
encoder4_pool, encoder4 = encoder_block(encoder3_pool, 512, 4,
self.use_batch_norm, self.freeze_bn,
self.initializer) # 8
center = conv_block(encoder4_pool, 1024, self.use_batch_norm,
self.freeze_bn) # center
decoder4 = decoder_block(center, encoder4, 512, 0, self.use_batch_norm,
self.freeze_bn, self.initializer) # 16
decoder3 = decoder_block(decoder4, encoder3, 256, 1, self.use_batch_norm,
self.freeze_bn, self.initializer) # 32
decoder2 = decoder_block(decoder3, encoder2, 128, 2, self.use_batch_norm,
self.freeze_bn, self.initializer) # 64
decoder1 = decoder_block(decoder2, encoder1, 64, 3, self.use_batch_norm,
self.freeze_bn, self.initializer) # 128
decoder0 = decoder_block(decoder1, encoder0, 32, 4, self.use_batch_norm,
self.freeze_bn, self.initializer) # 256
out = keras.layers.Conv2D(self.num_target_classes, (1, 1))(decoder0)
if export:
logger.debug("Building model for export")
out = self.get_activation_for_export(out)
return Model(inputs=[inputs], outputs=[out])
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/unet/model/vanilla_unet_dynamic.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base Unet model class that takes care of constructing feature extractor."""
import logging
import keras
from keras.layers import Input
from keras.models import Model
from keras.models import model_from_json
import tensorflow as tf
from nvidia_tao_tf1.core.models.quantize_keras_model import create_quantized_keras_model
from nvidia_tao_tf1.cv.unet.model.utilities import get_num_unique_train_ids
from nvidia_tao_tf1.cv.unet.proto.model_config_pb2 import ModelConfig
logger = logging.getLogger(__name__)
class UnetModel(object):
"""UNet class.
UNet class contains functionality for constructing Unet with all backbones.
"""
def __init__(self, num_layers, use_pooling, use_batch_norm, dropout_rate,
activation_config, target_class_names,
freeze_pretrained_layers, allow_loaded_model_modification,
template='resnet', all_projections=True, freeze_blocks=None,
freeze_bn=False, load_graph=True, initializer=None, seed=None,
enable_qat=False):
"""Init function.
Args:
num_layers (int): Number of layers for scalable feature extractors.
use_pooling (bool): Whether to add pooling layers to the feature extractor.
use_batch_norm (bool): Whether to add batch norm layers.
dropout_rate (float): Fraction of the input units to drop. 0.0 means dropout is
not used.
target_class_names (list): A list of target class names.
freeze_pretrained_layers (bool): Prevent updates to pretrained layers' parameters.
allow_loaded_model_modification (bool): Allow loaded model modification.
template (str): Model template to use for feature extractor.
freeze_bn (bool): The boolean to freeze BN or not.
load_graph (bool): The boolean to laod graph for phase 1.
initializer (str): layer initializer
enable_qat (bool): flag to enable QAT during training
seed (int): Experiment seed for initialization
"""
self.num_layers = num_layers
self.use_pooling = use_pooling
self.use_batch_norm = use_batch_norm
self.dropout_rate = dropout_rate
self.template = template
self.keras_model = None
self.keras_training_model = None
self.target_class_names = target_class_names
self.num_target_classes = get_num_unique_train_ids(self.target_class_names)
if activation_config == "sigmoid":
self.num_target_classes = 1
self.activation_config = activation_config
self.freeze_pretrained_layers = freeze_pretrained_layers
self.freeze_blocks = freeze_blocks
self.freeze_bn = freeze_bn
self.allow_loaded_model_modification = allow_loaded_model_modification
self.constructed = False
self.all_projections = all_projections
self.inference_model = None
self.load_graph = load_graph
self.data_format = "channels_first"
self.seed = seed
self.initializer = self.get_initializer(initializer)
self.enable_qat = enable_qat
self.num_params = None
def construct_model(self, input_shape, kernel_regularizer=None,
bias_regularizer=None, pretrained_weights_file=None,
enc_key=None, mode=None, features=None,
export=False, model_json=None, construct_qat=False,
custom_objs=None, qat_on_pruned=False):
"""Function to construct the encoder/ decoder.
Args:
input_shape (tuple / list / TensorShape):
model input shape without batch dimension (C, H, W).
kernel_regularizer (keras.regularizers.Regularizer instance):
Regularizer to be applied to convolution kernels.
bias_regularizer (keras.regularizers.Regularizer instance):
Regularizer to be applied to biases.
pretrained_weights_file (str): An optional model weights file to be loaded.
enc_key (str): The key to encrypt model.
model_json (str): path of pruned model graph.
qat_on_pruned (bool): This is to indicate if we are applying qat on pruned
model. That way the orig model will be loaded from pruned json and
not constructed.
construct_qat (bool): To indicate if qat grpah is being constructed
custom_objs (dictionary): The custom layers present in the model
Raises:
NotImplementedError: If pretrained_weights_file is not None.
"""
if self.load_graph or (self.enable_qat and not construct_qat):
# Load the pruned graph
with open(model_json, 'r') as json_file:
loaded_model_json = json_file.read()
model_unet_tmp = model_from_json(loaded_model_json,
custom_objects=custom_objs)
# Adding softmax explicitly
if export:
inputs = model_unet_tmp.input
out = model_unet_tmp.output
out = self.get_activation_for_export(out)
model_unet = Model(inputs=[inputs], outputs=[out])
else:
model_unet = model_unet_tmp
if qat_on_pruned:
# QAT on top of pruned model for training
model_unet = create_quantized_keras_model(model_unet)
else:
if "vanilla" in self.template:
model_unet = self.construct_encoder_decoder_model(input_shape, mode,
features, export)
else:
encoder_model = self.construct_feature_extractor(
input_shape=input_shape, data_format=self.data_format,
kernel_regularizer=kernel_regularizer, bias_regularizer=bias_regularizer,
features=features, use_pooling=True)
model_unet = self.construct_decoder_model(encoder_model, export=export)
if self.enable_qat:
model_unet = create_quantized_keras_model(model_unet)
if construct_qat:
# Construction phase of QAT
model_json = model_unet.to_json()
keras.backend.clear_session()
return model_json
self.keras_model = model_unet
self.num_params = model_unet.count_params()
self.constructed = True
return None
def get_initializer(self, initializer):
"""Function to assign the kernel initializer."""
if initializer == ModelConfig.KernelInitializer.HE_UNIFORM:
return tf.compat.v1.initializers.he_uniform(seed=self.seed)
if initializer == ModelConfig.KernelInitializer.HE_NORMAL:
return tf.compat.v1.initializers.he_normal(seed=self.seed)
return 'glorot_uniform'
def construct_feature_extractor(self, input_shape, data_format,
kernel_regularizer=None,
bias_regularizer=None, features=None,
use_pooling=False):
"""Generate a keras feature extractor model.
Args:
input_shape: model input shape (N,C,H,W). N is ignored.
data_format: Order of the dimensions (C, H, W).
kernel_regularizer: Keras regularizer to be applied to convolution
kernels.
bias_regularizer: Keras regularizer to be applied to biases.
features: Fed by the estimator during training.
use_pooling: Bool to use pooling.
Raises:
AssertionError: If the model is already constructed.
Returns:
model (keras.model): The model for feature extraction.
"""
assert not self.constructed, "Model already constructed."
# Define entry points to the model.
assert len(input_shape) == 3
self.input_num_channels = int(input_shape[0])
self.input_height = int(input_shape[1])
self.input_width = int(input_shape[2])
if features:
inputs = Input(tensor=features, shape=input_shape)
else:
inputs = Input(shape=(self.input_num_channels, self.input_height,
self.input_width))
# Set up positional arguments and key word arguments to instantiate
# feature extractor templates.
args = [self.num_layers, inputs]
kwargs = {'use_batch_norm': self.use_batch_norm,
'kernel_regularizer': kernel_regularizer,
'bias_regularizer': bias_regularizer,
'freeze_blocks': self.freeze_blocks,
'freeze_bn': self.freeze_bn,
'use_pooling': use_pooling
}
if self.template == "shufflenet":
kwargs['input_shape'] = input_shape
model = self.get_base_model(args, kwargs)
# Feature extractor output shape.
if self.template != "shufflenet":
# Feature extractor output shape.
self.output_height = model.output_shape[2]
self.output_width = model.output_shape[3]
return model
def get_activation_for_export(self, out):
"""Function to get the activation of last layer.
Args:
out (keras layer): Keras layer output
"""
if self.activation_config == "sigmoid":
out = keras.layers.Activation('sigmoid', name="sigmoid")(out)
else:
out = keras.layers.Permute((2, 3, 1))(out)
out = keras.layers.Softmax(axis=-1)(out)
return out
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/unet/model/unet_model.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ResNet Unet model class that takes care of constructing and validating a model."""
import logging
from keras.models import Model
from nvidia_tao_tf1.core.templates.resnet import ResNet
from nvidia_tao_tf1.cv.unet.model.layers import output_block_other, upsample_block_other
from nvidia_tao_tf1.cv.unet.model.unet_model import UnetModel
logger = logging.getLogger(__name__)
resnet_dict_gen = {'resnet': ('activation_1', 'block_1b_relu', 'block_2b_relu',
'block_3b_relu', 'block_4b_relu')}
resnet_10_dict = {'resnet': ('activation_1', 'block_1a_relu', 'block_2a_relu',
'block_3a_relu', 'block_4a_relu')}
class ResnetUnet(UnetModel):
"""ResnetUnet class."""
def __init__(self, *args, **kwargs):
"""Init function.
Args:
num_layers (int): Number of layers for scalable feature extractors.
use_pooling (bool): Whether to add pooling layers to the feature extractor.
use_batch_norm (bool): Whether to add batch norm layers.
dropout_rate (float): Fraction of the input units to drop. 0.0 means dropout is
not used.
target_class_names (list): A list of target class names.
freeze_pretrained_layers (bool): Prevent updates to pretrained layers' parameters.
allow_loaded_model_modification (bool): Allow loaded model modification.
template (str): Model template to use for feature extractor.
freeze_bn (bool): The boolean to freeze BN or not.
load_graph (bool): The boolean to laod graph for phase 1.
"""
super(ResnetUnet, self).__init__(*args, **kwargs)
def construct_decoder_model(self, encoder_model, export=False):
"""Construct the decoder for Unet with Resnet as backbone.
Args:
encoder_model (keras.model): keras model type.
export (bool): Set the inference flag to build the
inference model with softmax.
Returns:
model (keras.model): The entire Unet model with encoder and decoder.
"""
if self.num_layers == 10:
resnet_dict = resnet_10_dict
else:
resnet_dict = resnet_dict_gen
B1, B2, B3, _, _ = resnet_dict[self.template]
S1 = encoder_model.get_layer(B1).output
S2 = encoder_model.get_layer(B2).output
S3 = encoder_model.get_layer(B3).output
skips = [S3, S2, S1]
x = encoder_model.output
decoder_filters = (256, 128, 64, 64)
n_upsample_blocks = 4
for i in range(n_upsample_blocks):
if i < len(skips):
skip = skips[i]
else:
skip = None
x = upsample_block_other(x, skip, decoder_filters[i],
use_batchnorm=self.use_batch_norm,
freeze_bn=self.freeze_bn,
initializer=self.initializer)
out = output_block_other(x, n_classes=self.num_target_classes)
if export:
logger.debug("Building model for export")
out = self.get_activation_for_export(out)
model_unet = Model(inputs=encoder_model.input, outputs=out)
return model_unet
def get_base_model(self, args, kwargs):
"""Function to construct model specific backbone."""
kwargs['use_pooling'] = False
model_class = ResNet
kwargs['all_projections'] = self.all_projections
model = model_class(*args, **kwargs)
return model
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/unet/model/resnet_unet.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for training and inference."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import os
import random
import tempfile
import warnings
from zipfile import BadZipFile, ZipFile
from addict import Dict
import keras
from keras import backend as K
from keras.models import model_from_json
import numpy as np
import tensorflow as tf
from nvidia_tao_tf1.core.templates.shufflenet import ChannelShuffle, GroupLayer
from nvidia_tao_tf1.cv.common.utils import restore_eff
from nvidia_tao_tf1.cv.unet.distribution import distribution
from nvidia_tao_tf1.cv.unet.proto.regularizer_config_pb2 import RegularizerConfig
from nvidia_tao_tf1.encoding import encoding
CUSTOM_OBJS = {}
logger = logging.getLogger(__name__)
class TargetClass(object):
"""Target class parameters."""
def __init__(self, name, label_id, train_id=None):
"""Constructor.
Args:
name (str): Name of the target class.
label_id (str):original label id of every pixel of the mask
train_id (str): The mapped train id of every pixel in the mask
Raises:
ValueError: On invalid input args.
"""
self.name = name
self.train_id = train_id
self.label_id = label_id
def initialize(experiment_spec):
"""Initialization. Initializes the environment variables.
Args:
experiment_spec: Loaded Unet Experiment spec.
training_precision: (TrainingPrecision or None) Proto object with
FP16/FP32 parameters or None. None leaves K.floatx()
in its previous setting.
"""
random_seed = experiment_spec.random_seed
training_precision = experiment_spec.model_config.training_precision
setup_keras_backend(training_precision, is_training=True)
# Set Maglev random seed. Take care to give different seed to each process.
seed = distribution.get_distributor().distributed_seed(random_seed)
set_random_seed(seed)
os.environ['CUDA_CACHE_DISABLE'] = '0'
os.environ['HOROVOD_GPU_ALLREDUCE'] = 'NCCL'
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
os.environ['TF_GPU_THREAD_MODE'] = 'gpu_private'
os.environ['HOROVOD_GPU_ALLREDUCE'] = 'NCCL'
os.environ['TF_USE_CUDNN_BATCHNORM_SPATIAL_PERSISTENT'] = '0'
os.environ['CACHE'] = 'false'
os.environ['TF_ADJUST_HUE_FUSED'] = 'data'
os.environ['TF_ADJUST_SATURATION_FUSED'] = 'data'
os.environ['TF_ENABLE_WINOGRAD_NONFUSED'] = 'data'
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
os.environ['TF_SYNC_ON_FINISH'] = '0'
os.environ['TF_AUTOTUNE_THRESHOLD'] = '2'
def initialize_params(experiment_spec):
"""Initialization of the params object to the estimator runtime config.
Args:
experiment_spec: Loaded Unet Experiment spec.
"""
training_config = experiment_spec.training_config
dataset_config = experiment_spec.dataset_config
model_config = experiment_spec.model_config
return Dict({
'exec_mode': "train",
'model_dir': None,
'resize_padding': dataset_config.resize_padding if
dataset_config.resize_padding else False,
'resize_method': dataset_config.resize_method.upper() if
dataset_config.resize_method else 'BILINEAR',
'log_dir': None,
'batch_size': training_config.batch_size if training_config.batch_size else 1,
'learning_rate': training_config.learning_rate if
training_config.learning_rate else 0.0001,
'activation': model_config.activation if model_config.activation else "softmax",
'crossvalidation_idx': training_config.crossvalidation_idx if
training_config.crossvalidation_idx else None,
'max_steps': None,
'regularizer_type': training_config.regularizer.type if
training_config.regularizer.type else None,
'weight_decay': training_config.regularizer.weight if
training_config.regularizer.weight else 0,
'log_summary_steps': training_config.log_summary_steps if
training_config.log_summary_steps else 1,
'warmup_steps': training_config.warmup_steps,
'augment': dataset_config.augment if dataset_config.augment else False,
'use_amp': training_config.use_amp if training_config.use_amp else False,
'filter_data': dataset_config.filter_data if dataset_config.filter_data else False,
'use_trt': training_config.use_trt if training_config.use_trt else False,
'use_xla': training_config.use_xla if training_config.use_xla else False,
'loss': training_config.loss if training_config.loss else "cross_entropy",
'epochs': training_config.epochs if training_config.epochs else None,
'pretrained_weights_file': model_config.pretrained_model_file if
model_config.pretrained_model_file else None,
'lr_scheduler': training_config.lr_scheduler if
training_config.HasField("lr_scheduler") else None,
'unet_model': None,
'key': None,
'experiment_spec': None,
'seed': experiment_spec.random_seed,
'benchmark': False,
'temp_dir': tempfile.mkdtemp(),
'num_classes': None,
'num_conf_mat_classes': None,
'start_step': 0,
'checkpoint_interval': training_config.checkpoint_interval if
training_config.checkpoint_interval else 1,
'model_json': None,
'custom_objs': CUSTOM_OBJS,
'load_graph': model_config.load_graph if model_config.load_graph else False,
'remove_head': model_config.remove_head if model_config.remove_head else False,
'buffer_size': training_config.buffer_size if
training_config.buffer_size else None,
'data_options': training_config.data_options if
training_config.data_options else False,
'weights_monitor': training_config.weights_monitor if
training_config.weights_monitor else False,
'visualize': training_config.visualizer.enabled if
training_config.visualizer.enabled else False,
'save_summary_steps': training_config.visualizer.save_summary_steps if
training_config.visualizer.save_summary_steps else None,
'infrequent_save_summary_steps': training_config.visualizer.infrequent_save_summary_steps if
training_config.visualizer.infrequent_save_summary_steps else None,
'enable_qat': model_config.enable_qat if model_config.enable_qat else False
})
def save_tmp_json(tmp_model_obj):
"""Function to save the QAT model to temporary json file."""
tmp_json_dir = tempfile.mkdtemp()
tmp_json = os.path.join(tmp_json_dir, "tmp_model.json")
with open(tmp_json, 'w') as json_file:
json_file.write(tmp_model_obj)
return tmp_json
def get_weights_dir(results_dir):
"""Return weights directory.
Args:
results_dir: Base results directory.
Returns:
A directory for saved model and weights.
"""
save_weights_dir = os.path.join(results_dir, 'weights')
if distribution.get_distributor().is_master() and not os.path.exists(save_weights_dir):
os.makedirs(save_weights_dir)
return save_weights_dir
def set_random_seed(seed):
"""Set random seeds.
This sets the random seeds of Python, Numpy and TensorFlow.
Args:
seed (int): seed to use
"""
random.seed(seed)
np.random.seed(seed)
tf.compat.v1.set_random_seed(seed)
def get_latest_tlt_model(results_dir):
"""Utility function to return the latest tlt model in a dir."""
trainable_ckpts = [int(item.split('.')[1].split('-')[1]) for item in os.listdir(results_dir)
if item.endswith(".tlt")]
num_ckpts = len(trainable_ckpts)
if num_ckpts == 0:
return None
latest_step = sorted(trainable_ckpts, reverse=True)[0]
latest_checkpoint = os.path.join(results_dir, "model.epoch-{}.tlt".format(latest_step))
return latest_checkpoint
def get_pretrained_model_path(model_file):
"""Get pretrained model file name and check it exists.
If the supplied model file is not absolute it will be prepended with the
data root. The data root is set according to current path.
Args:
model_file (string): Name of the stored model file (.hdf5).
Returns:
Absolute path to the model file if the input model_file is not an
empty string. Else None.
Raises:
AssertionError if the model file does not exist.
"""
if model_file:
if not os.path.isabs(model_file):
model_file = os.path.join(os.getcwd(),
model_file)
else:
model_file = None
return model_file
def extract_pruned_model(model_file, key=None):
"""Get pruned model file name and check it exists.
If the supplied model file is not absolute it will be prepended with the
data root. The data root is set according to current path.
Args:
model_file (string): Name of the pruned model file (.tlt).
Returns:
The model files, jsons and meta files that exist in that tlt.
Raises:
AssertionError if the model file does not exist.
"""
encrypted = False
if os.path.isdir(model_file):
temp_dir = model_file
else:
encrypted = True
temp_dir = tempfile.mkdtemp()
logger.info("Loading weights from {}".format(model_file))
os_handle, temp_zip_path = tempfile.mkstemp()
os.close(os_handle)
# Decrypt the checkpoint file.
with open(model_file, 'rb') as encoded_file, open(temp_zip_path, 'wb') as tmp_zipf:
encoding.decode(encoded_file, tmp_zipf, key.encode())
encoded_file.closed
tmp_zipf.closed
# Load zip file and extract members to a tmp_directory.
try:
with ZipFile(temp_zip_path, 'r') as zip_object:
for member in zip_object.namelist():
zip_object.extract(member, path=temp_dir)
except BadZipFile:
raise ValueError("Please double check your encryption key.")
except Exception:
raise IOError("The last checkpoint file is not saved properly. \
Please delete it and rerun the script.")
model_names = [os.path.join(temp_dir, f) for f
in os.listdir(temp_dir) if f.endswith(".h5")]
model_jsons = [os.path.join(temp_dir, f) for f
in os.listdir(temp_dir) if f.endswith(".json")]
meta_files = [f for f in os.listdir(temp_dir) if f.endswith(".meta")]
meta_files_final = []
if len(meta_files) > 0:
if "-" in meta_files[0]:
step = int(meta_files[0].split('model.ckpt-')[-1].split('.')[0])
# Removing the temporary zip path.
if encrypted:
os.remove(temp_zip_path)
meta_files_final.append(os.path.join(temp_dir, "model.ckpt-{}".format(step)))
else:
if encrypted:
os.remove(temp_zip_path)
meta_files_final = meta_files_final.append(os.path.join(temp_dir, "model.ckpt"))
return model_names, model_jsons, meta_files_final
def get_pretrained_ckpt(model_file, key=None, custom_objs=None):
"""Get pretrained model file name and check it exists.
If the supplied model file is not absolute it will be prepended with the
data root. The data root is set according to current path.
Args:
model_file (string): Name of the stored model file (.hdf5).
Returns:
Absolute path to the model file if the input model_file is not an
empty string. Else None.
Raises:
AssertionError if the model file does not exist.
"""
_, ext = os.path.splitext(model_file)
pruned_graph = False
if ext == ".tlt" or os.path.isdir(model_file):
model_names, model_jsons, meta_files = extract_pruned_model(model_file, key=key)
pruned_graph = True
# This is loading from a tlt which is ckpt
if len(meta_files) > 0:
model_json = model_jsons[0] if len(model_jsons) > 0 else None
return meta_files[0], model_json, pruned_graph
# This is loading from pruned hdf5
model_name = model_names[0]
model_json = model_jsons[0]
tmp_ckpt_dir = tempfile.mkdtemp()
tmp_ckpt_path = os.path.join(tmp_ckpt_dir, "model.ckpt")
with open(model_json, 'r') as json_file:
loaded_model_json = json_file.read()
loaded_model = model_from_json(loaded_model_json, custom_objects=custom_objs)
# load weights into new model
loaded_model.load_weights(model_name)
km_weights = tf.get_collection(tf.compat.v1.GraphKeys.GLOBAL_VARIABLES,
scope=None)
tf.compat.v1.train.get_or_create_global_step()
global_step = tf.compat.v1.train.get_global_step()
km_weights.append(global_step)
saver = tf.train.Saver(km_weights)
keras_session = keras.backend.get_session()
save_path = saver.save(keras_session, tmp_ckpt_path)
keras.backend.clear_session()
return save_path, model_json, pruned_graph
if ext in (".h5", ".hdf5"):
tmp_ckpt_dir = tempfile.mkdtemp()
tmp_ckpt_path = os.path.join(tmp_ckpt_dir, "model.ckpt")
km_weights = tf.get_collection(tf.compat.v1.GraphKeys.GLOBAL_VARIABLES,
scope=None)
tf.compat.v1.train.get_or_create_global_step()
global_step = tf.compat.v1.train.get_global_step()
km_weights.append(global_step)
saver = tf.train.Saver(km_weights)
keras_session = keras.backend.get_session()
save_path = saver.save(keras_session, tmp_ckpt_path)
keras.backend.clear_session()
return save_path, None, pruned_graph
raise NotImplementedError("{0} file is not supported!".format(ext))
def setup_keras_backend(training_precision, is_training):
"""Setup Keras-specific backend settings for training or inference.
Args:
training_precision: (TrainingPrecision or None) Proto object with
FP16/FP32 parameters or None. None leaves K.floatx() in its previous
setting.
is_training: (bool) If enabled, Keras is set in training mode.
"""
# Learning phase of '1' indicates training mode -- important for operations
# that behave differently at training/test times (e.g. batch normalization)
if is_training:
K.set_learning_phase(1)
else:
K.set_learning_phase(0)
# Set training precision, if given. Otherwise leave K.floatx() in its
# previous setting. K.floatx() determines how Keras creates weights and
# casts them (Keras default: 'float32').
if training_precision is not None:
if training_precision.backend_floatx == training_precision.FLOAT32:
K.set_floatx('float32')
elif training_precision.backend_floatx == training_precision.FLOAT16:
K.set_floatx('float16')
else:
raise RuntimeError('Invalid training precision selected')
def get_results_dir(results_dir):
"""Return weights directory.
Args:
results_dir: Base results directory.
Returns:
Creates the result dir if not present and returns the path.
"""
if distribution.get_distributor().is_master() and not os.path.exists(results_dir):
os.makedirs(results_dir)
return results_dir
def build_target_class_list(data_class_config):
"""Build a list of TargetClasses based on proto.
Arguments:
cost_function_config: CostFunctionConfig.
Returns:
A list of TargetClass instances.
"""
target_classes = []
orig_class_label_id_map = {}
for target_class in data_class_config.target_classes:
orig_class_label_id_map[target_class.name] = target_class.label_id
class_label_id_calibrated_map = orig_class_label_id_map.copy()
for target_class in data_class_config.target_classes:
label_name = target_class.name
train_name = target_class.mapping_class
class_label_id_calibrated_map[label_name] = orig_class_label_id_map[train_name]
train_ids = sorted(list(set(class_label_id_calibrated_map.values())))
train_id_calibrated_map = {}
for idx, tr_id in enumerate(train_ids):
train_id_calibrated_map[tr_id] = idx
class_train_id_calibrated_map = {}
for label_name, train_id in class_label_id_calibrated_map.items():
class_train_id_calibrated_map[label_name] = train_id_calibrated_map[train_id]
for target_class in data_class_config.target_classes:
target_classes.append(
TargetClass(target_class.name, label_id=target_class.label_id,
train_id=class_train_id_calibrated_map[target_class.name]))
for target_class in target_classes:
logger.info("Label Id {}: Train Id {}".format(target_class.label_id, target_class.train_id))
return target_classes
def get_train_class_mapping(target_classes):
"""Utility function that returns the mapping of the train id to orig class."""
train_id_name_mapping = {}
for target_class in target_classes:
if target_class.train_id not in train_id_name_mapping.keys():
train_id_name_mapping[target_class.train_id] = [target_class.name]
else:
train_id_name_mapping[target_class.train_id].append(target_class.name)
return train_id_name_mapping
def compute_steps_per_epoch(num_samples, batch_size_per_gpu):
"""Compute steps per epoch based on data set size, minibatch size.
and number of GPUs.
Args:
num_samples (int): Number of samples in a data set.
batch_size_per_gpu (int): Minibatch size for a single GPU.
logger: logger instance.
Returns:
Number of steps needed to iterate through the data set once.
"""
logger.info("The total number of training samples {} and the batch size per \
GPU {}".format(num_samples, batch_size_per_gpu))
steps_per_epoch, remainder = divmod(num_samples, batch_size_per_gpu)
if remainder != 0:
logger.info("Cannot iterate over exactly {} samples with a batch size of {}; "
"each epoch will therefore take one extra step.".format(
num_samples, batch_size_per_gpu))
steps_per_epoch = steps_per_epoch + 1
number_of_processors = distribution.get_distributor().size()
steps_per_epoch, remainder = divmod(steps_per_epoch, number_of_processors)
if remainder != 0:
logger.info("Cannot iterate over exactly {} steps per epoch with {} processors; "
"each processor will therefore take one extra step per epoch.".format(
steps_per_epoch, batch_size_per_gpu))
steps_per_epoch = steps_per_epoch + 1
logger.info("Steps per epoch taken: {}".format(steps_per_epoch))
return steps_per_epoch
def compute_summary_logging_frequency(steps_per_epoch_per_gpu, num_logging_points=10):
"""Compute summary logging point frequency.
Args:
steps_per_epoch_per_gpu (int): Number of steps per epoch for single GPU.
num_logging_points (int): Number of logging points per epoch.
Returns:
Summary logging frequency (int).
"""
if num_logging_points > steps_per_epoch_per_gpu:
return 1 # Log every step in epoch.
return steps_per_epoch_per_gpu // num_logging_points
def build_regularizer(regularizer_config):
"""Build kernel and bias regularizers.
Arguments:
regularizer_config (regularizer_config_pb2.RegularizerConfig): Config for
regularization.
Returns:
kernel_regularizer, bias_regularizer: Keras regularizers created.
"""
# Check the config and create objects.
if regularizer_config.weight < 0.0:
raise ValueError("TrainingConfig.regularization_weight must be >= 0")
if regularizer_config.type == RegularizerConfig.NO_REG:
kernel_regularizer = None
bias_regularizer = None
elif regularizer_config.type == RegularizerConfig.L1:
kernel_regularizer = keras.regularizers.l1(regularizer_config.weight)
bias_regularizer = keras.regularizers.l1(regularizer_config.weight)
elif regularizer_config.type == RegularizerConfig.L2:
kernel_regularizer = keras.regularizers.l2(regularizer_config.weight)
bias_regularizer = keras.regularizers.l2(regularizer_config.weight)
else:
raise NotImplementedError("The selected regularizer is not supported.")
return kernel_regularizer, bias_regularizer
def get_num_unique_train_ids(target_classes):
"""Return the final number classes used for training.
Arguments:
target_classes: The target classes object that contain the train_id and
label_id.
Returns:
Number of classes to be segmented.
"""
train_ids = [target.train_id for target in target_classes]
train_ids = np.array(train_ids)
train_ids_unique = np.unique(train_ids)
return len(train_ids_unique)
def update_train_params(params, num_training_examples):
"""Update the estimator with number epochs parameter."""
if not params["max_steps"]:
assert(params["epochs"]), "Num Epochs value needs to be provided."
steps_per_epoch = compute_steps_per_epoch(num_training_examples,
params["batch_size"])
params["steps_per_epoch"] = steps_per_epoch
params["max_steps"] = steps_per_epoch * params["epochs"]
if not params["save_summary_steps"]:
params["save_summary_steps"] = min(1, params["steps_per_epoch"])
if not params["infrequent_save_summary_steps"]:
params["infrequent_save_summary_steps"] = steps_per_epoch
assert(params["save_summary_steps"] <= params["steps_per_epoch"]), \
"Number of save_summary_steps should be less than number of steps per epoch."
assert(params["infrequent_save_summary_steps"] <= params["max_steps"]), \
"Number of infrequent_save_summary_steps should be less than total number of steps."
return params
def get_custom_objs(model_arch=None):
"""Function to return the custom layers as objects."""
CUSTOM_OBJS = {}
if model_arch == "shufflenet":
CUSTOM_OBJS = {'GroupLayer': GroupLayer, 'ChannelShuffle': ChannelShuffle}
return CUSTOM_OBJS
def update_model_params(params, unet_model, input_model_file_name=None,
experiment_spec=None, key=None, results_dir=None,
target_classes=None, phase=None, model_json=None,
custom_objs=None):
"""Update the estimator with additional parameters.
Args:
params: Additional parameters for the estimator config.
unet_model: Unet model object.
input_model_file_name: Path to pretrained weights.
experiment_spec: The experiment proto.
key: encryption key for trained models.
results_dir: Path to the result trained weights.
target_classes: Target classes object that contains the train_id/
label_id
"""
params["unet_model"] = unet_model
params["key"] = key
params["pretrained_weights_file"] = input_model_file_name
params["experiment_spec"] = experiment_spec
params['model_dir'] = results_dir
params['seed'] = experiment_spec.random_seed
params['num_classes'] = get_num_unique_train_ids(target_classes)
params['num_conf_mat_classes'] = get_num_unique_train_ids(target_classes)
# Sanity check for the activation being sigmoid
if params['activation'] == 'sigmoid' and params['num_classes'] > 2:
warnings.warn("Sigmoid activation can only be used for binary segmentation. \
Defaulting to softmax activation.")
params['activation'] = 'softmax'
if params['activation'] == 'sigmoid' and params['num_classes'] == 2:
params['num_classes'] = 1
params['phase'] = phase
params['model_json'] = model_json
model_arch = experiment_spec.model_config.arch
params['custom_objs'] = get_custom_objs(model_arch=model_arch)
return params
def get_init_ops():
"""Return all ops required for initialization."""
return tf.group(tf.local_variables_initializer(),
tf.tables_initializer(),
*tf.get_collection('iterator_init'))
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/unet/model/utilities.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -*- coding: utf-8 -*-
"""Contains a set of utilities that allow building the UNet model."""
import keras
from keras import backend as K
from keras.layers import BatchNormalization
import numpy as np
import tensorflow as tf
keras.backend.set_image_data_format('channels_first')
def downsample_block(inputs, filters):
"""UNet downsample block.
Perform 2 unpadded convolutions with a specified number of filters and downsample
through max-pooling
Args:
inputs (tf.Tensor): Tensor with inputs
filters (int): Number of filters in convolution
Return:
Tuple of convolved ``inputs`` after and before downsampling
"""
out = inputs
out = keras.layers.Conv2D(
filters=filters,
kernel_size=(3, 3),
activation=tf.nn.relu)(out)
out = keras.layers.Conv2D(
filters=filters,
kernel_size=(3, 3),
activation=tf.nn.relu)(out)
out_pool = keras.layers.MaxPool2D(pool_size=(2, 2), strides=2)(out)
return out_pool, out
def conv_kernel_initializer(shape, dtype=K.floatx()):
"""Initialization for convolutional kernels.
The main difference with tf.variance_scaling_initializer is that
tf.variance_scaling_initializer uses a truncated normal with an uncorrected
standard deviation, whereas here we use a normal distribution. Similarly,
tf.contrib.layers.variance_scaling_initializer uses a truncated normal with
a corrected standard deviation.
Args:
shape: shape of variable
dtype: dtype of variable
Returns:
an initialization for the variable
"""
kernel_height, kernel_width, _ , out_filters = shape
fan_out = int(kernel_height * kernel_width * out_filters)
return tf.random.normal(
shape, mean=0.0, stddev=np.sqrt(2.0 / fan_out), dtype=dtype)
def upsample_block(inputs, residual_input, filters):
"""UNet upsample block.
Perform 2 unpadded convolutions with a specified number of filters and upsample
Args:
inputs (tf.Tensor): Tensor with inputs
residual_input (tf.Tensor): Residual input
filters (int): Number of filters in convolution
Return:
Convolved ``inputs`` after upsampling
"""
cropped = keras.layers.Cropping2D(((filters[1], filters[1]), (filters[1],
filters[1])), data_format="channels_first")(residual_input)
concat_axis = 3 if K.image_data_format() == 'channels_last' else 1
out = keras.layers.Concatenate(axis=concat_axis)([inputs, cropped])
out = keras.layers.Conv2D(
filters=filters[0],
kernel_size=(3, 3),
activation=tf.nn.relu)(out)
out = keras.layers.Conv2D(
filters=int(filters[0]),
kernel_size=(3, 3),
activation=tf.nn.relu)(out)
out = keras.layers.Conv2DTranspose(
filters=int(filters[2]),
kernel_size=(3, 3),
strides=(2, 2),
padding='same',
activation=tf.nn.relu)(out)
return out
def conv2D_bn(x, filters, use_batchnorm, freeze_bn, kernel_size=(3, 3),
kernel_initializer='glorot_uniform', activation=None):
"""UNet conv2D_bn.
Perform convolution followed by BatchNormalization
Args:
x (tf.Tensor): Tensor with inputs
use_batchnorm (bool): Flag to set the batch normalization.
filters (int): Number of filters in convolution
kernel_initializer (str): Initialization of layer.
kernel_size (tuple): Size of filter
Return:
Convolved ``inputs`` after convolution.
"""
x = keras.layers.Conv2D(
filters=filters,
kernel_size=kernel_size,
activation=None, padding='same',
kernel_initializer=kernel_initializer)(x)
if use_batchnorm:
if freeze_bn:
x = keras.layers.BatchNormalization(axis=1)(x, training=False)
else:
x = keras.layers.BatchNormalization(axis=1)(x)
x = keras.layers.Activation('relu')(x)
return x
def upsample_block_other(inputs, residual_input, filters, use_batchnorm=False,
freeze_bn=False, initializer='glorot_uniform'):
"""UNet upsample block.
Perform 2 unpadded convolutions with a specified number of filters and upsample
Args:
inputs (tf.Tensor): Tensor with inputs
residual_input (tf.Tensor): Residual input
filters (int): Number of filters in convolution
freeze_bn (bool): Flag to freeze batch norm.
Return:
Convolved ``inputs`` after upsampling.
"""
x = keras.layers.Conv2DTranspose(
filters=int(filters),
kernel_size=(4, 4),
strides=(2, 2),
padding='same',
activation=None,
data_format="channels_first",
kernel_initializer=initializer)(inputs)
concat_axis = 3 if K.image_data_format() == 'channels_last' else 1
if residual_input is not None:
x = keras.layers.Concatenate(axis=concat_axis)([x, residual_input])
if use_batchnorm:
if freeze_bn:
x = keras.layers.BatchNormalization(axis=1)(x, training=False)
else:
x = keras.layers.BatchNormalization(axis=1)(x)
x = keras.layers.Activation('relu')(x)
x = keras.layers.Conv2D(
filters=filters,
kernel_size=(3, 3),
activation=None, padding='same',
kernel_initializer=initializer)(x)
if use_batchnorm:
if freeze_bn:
x = keras.layers.BatchNormalization(axis=1)(x, training=False)
else:
x = keras.layers.BatchNormalization(axis=1)(x)
x = keras.layers.Activation('relu')(x)
return x
def bottleneck(inputs, filters_up, mode):
"""UNet central block.
Perform 2 unpadded convolutions with a specified number of filters and upsample
including dropout before upsampling for training
Args:
inputs (tf.Tensor): Tensor with inputs
filters_up (int): Number of filters in convolution
Return:
Convolved ``inputs`` after bottleneck.
"""
out = inputs
out = keras.layers.Conv2DTranspose(
filters=filters_up,
kernel_size=(3, 3),
strides=(2, 2),
padding='same',
activation=tf.nn.relu)(out)
return out
def output_block(inputs, residual_input, filters, n_classes):
"""UNet output.
Perform 3 unpadded convolutions, the last one with the same number
of channels as classes we want to classify
Args:
inputs (tf.Tensor): Tensor with inputs
residual_input (tf.Tensor): Residual input
filters (int): Number of filters in convolution
n_classes (int): Number of output classes
Return:
Convolved ``inputs`` with as many channels as classes
"""
crop_pad = (K.int_shape(residual_input)[2] - K.int_shape(inputs)[2])//2
cropped = keras.layers.Cropping2D(((crop_pad, crop_pad), (crop_pad,
crop_pad)), data_format="channels_first")(residual_input)
concat_axis = 3 if K.image_data_format() == 'channels_last' else 1
out = keras.layers.Concatenate(axis=concat_axis)([inputs, cropped])
out = keras.layers.Conv2D(
filters=filters,
kernel_size=(3, 3),
activation=tf.nn.relu)(out)
out = keras.layers.Conv2D(
filters=filters,
kernel_size=(3, 3),
activation=tf.nn.relu)(out)
return keras.layers.Conv2D(
filters=n_classes,
kernel_size=(1, 1),
activation=None)(out)
def output_block_other(inputs, n_classes):
"""UNet output.
Perform 3 unpadded convolutions, the last one with the same number
of channels as classes we want to classify
Args:
inputs (tf.Tensor): Tensor with inputs
residual_input (tf.Tensor): Residual input
filters (int): Number of filters in convolution
n_classes (int): Number of output classes
Return:
Convolved ``inputs`` with as many channels as classes
"""
return keras.layers.Conv2D(filters=n_classes,
kernel_size=(3, 3),
padding='same',
activation=None)(inputs)
def input_block(inputs, filters):
"""UNet input block.
Perform 2 unpadded convolutions with a specified number of filters and downsample
through max-pooling. First convolution
Args:
inputs (tf.Tensor): Tensor with inputs
filters (int): Number of filters in convolution
Return:
Tuple of convolved ``inputs`` after and before downsampling
"""
out = inputs
out = keras.layers.Conv2D(
filters=filters,
kernel_size=(3, 3),
activation=tf.nn.relu)(out)
out = keras.layers.Conv2D(
filters=filters,
kernel_size=(3, 3),
activation=tf.nn.relu)(out)
out_pool = keras.layers.MaxPool2D(pool_size=(2, 2), strides=2)(out)
return out_pool, out
def conv_block(input_tensor, num_filters, use_batch_norm, freeze_bn,
initializer='glorot_uniform'):
"""UNet conv block.
Perform convolution followed by BatchNormalization.
Args:
inputs (tf.Tensor): Tensor with inputs
filters (int): Number of filters in convolution
Return:
Tuple of convolved ``inputs`` after and before downsampling
"""
encoder = keras.layers.Conv2D(num_filters, (3, 3), padding="same",
kernel_initializer=initializer)(input_tensor)
if use_batch_norm:
if freeze_bn:
encoder = BatchNormalization(axis=1)(encoder, training=False)
else:
encoder = BatchNormalization(axis=1)(encoder)
encoder = keras.layers.Activation("relu")(encoder)
encoder = keras.layers.Conv2D(num_filters, (3, 3), padding="same",
kernel_initializer=initializer)(encoder)
if use_batch_norm:
if freeze_bn:
encoder = BatchNormalization(axis=1)(encoder, training=False)
else:
encoder = BatchNormalization(axis=1)(encoder)
encoder = keras.layers.Activation("relu")(encoder)
return encoder
def encoder_block(input_tensor, num_filters, block_idx, use_batch_norm,
freeze_bn, initializer):
"""UNet encoder block.
Perform convolution followed by BatchNormalization.
Args:
input_tensor (tf.Tensor): Tensor with inputs
num_filters (int): Number of filters in convolution
Return:
Tuple of convolved ``inputs`` after and before downsampling
"""
encoder = conv_block(input_tensor, num_filters, use_batch_norm, freeze_bn,
initializer)
encoder_pool = keras.layers.MaxPool2D((2, 2), strides=(2, 2),
data_format='channels_first')(encoder)
return encoder_pool, encoder
def decoder_block(input_tensor, concat_tensor, num_filters, block_idx, use_batch_norm,
freeze_bn, initializer):
"""UNet decoder block.
Perform convolution followed by BatchNormalization.
Args:
input_tensor (tf.Tensor): Tensor with inputs
num_filters (int): Number of filters in convolution
concat_tensor (tensor): Tensor to concatenate.
Return:
Tuple of convolved ``inputs`` after and before downsampling
"""
concat_axis = 3 if K.image_data_format() == 'channels_last' else 1
decoder = keras.layers.Conv2DTranspose(num_filters, (2, 2),
strides=(2, 2), padding='same')(input_tensor)
decoder = keras.layers.Concatenate(axis=concat_axis)([concat_tensor, decoder])
if use_batch_norm:
if freeze_bn:
decoder = BatchNormalization(axis=1)(decoder, training=False)
else:
decoder = BatchNormalization(axis=1)(decoder)
decoder = keras.layers.Activation("relu")(decoder)
decoder = keras.layers.Conv2D(num_filters, (3, 3), padding="same",
kernel_initializer=initializer)(decoder)
if use_batch_norm:
if freeze_bn:
decoder = BatchNormalization(axis=1)(decoder, training=False)
else:
decoder = BatchNormalization(axis=1)(decoder)
decoder = keras.layers.Activation("relu")(decoder)
decoder = keras.layers.Conv2D(num_filters, (3, 3), padding="same",
kernel_initializer=initializer)(decoder)
if use_batch_norm:
if freeze_bn:
decoder = BatchNormalization(axis=1)(decoder, training=False)
else:
decoder = BatchNormalization(axis=1)(decoder)
decoder = keras.layers.Activation("relu")(decoder)
return decoder
def Conv2DTranspose_block(input_tensor, filters, kernel_size=(3, 3),
transpose_kernel_size=(2, 2), upsample_rate=(2, 2),
initializer='glorot_uniform', skip=None,
use_batchnorm=False, freeze_bn=False):
"""UNet Conv2DTranspose_block.
Perform convolution followed by BatchNormalization.
Args:
input_tensor (tf.Tensor): Tensor with inputs
filters (int): The filters to be used for convolution.
transpose_kernel_size (int): Kernel size.
skip (Tensor): Skip tensor to be concatenated.
use_batchnorm (Bool): Flag to use batch norm or not.
freeze_bn (Bool): Flaf to freeze the Batchnorm/ not.
Return:
Tuple of convolved ``inputs`` after and before downsampling
"""
x = keras.layers.Conv2DTranspose(filters, transpose_kernel_size,
strides=upsample_rate, padding='same',
data_format="channels_first")(input_tensor)
concat_axis = 3 if K.image_data_format() == 'channels_last' else 1
if skip is not None:
x = keras.layers.Concatenate(axis=concat_axis)([x, skip])
x = DoubleConv(x, filters, kernel_size, initializer=initializer,
use_batchnorm=use_batchnorm, freeze_bn=freeze_bn)
return x
def DoubleConv(x, filters, kernel_size, initializer='glorot_uniform',
use_batchnorm=False, freeze_bn=False):
"""UNet DoubleConv.
Perform convolution followed by BatchNormalization.
Args:
x (tf.Tensor): Tensor with inputs
kernel_size (int): Size of filter.
initializer (str): The intization of layer.
use_batchnorm (Bool): Flag to use batch norm or not.
freeze_bn (Bool): Flaf to freeze the Batchnorm/ not.
Return:
Tuple of convolved ``inputs`` after and before downsampling
"""
x = keras.layers.Conv2D(filters, kernel_size, padding='same',
use_bias=False, kernel_initializer=initializer)(x)
if freeze_bn:
x = BatchNormalization(axis=1)(x, training=False)
else:
x = BatchNormalization(axis=1)(x)
x = keras.layers.Activation('relu')(x)
x = keras.layers.Conv2D(filters, kernel_size, padding='same', use_bias=False,
kernel_initializer=initializer)(x)
if use_batchnorm:
if freeze_bn:
x = BatchNormalization(axis=1)(x, training=False)
else:
x = BatchNormalization(axis=1)(x)
x = keras.layers.Activation('relu')(x)
return x
def convTranspose2D_bn(x, filters, use_batchnorm, freeze_bn,
kernel_initializer='glorot_uniform', activation=None, padding='None'):
"""UNet convTranspose2D_bn.
Perform transposed convolution followed by BatchNormalization
Args:
x (tf.Tensor): Tensor with inputs
use_batchnorm (bool): Flag to set the batch normalization.
filters (int): Number of filters in convolution
kernel_initializer (str): Initialization of layer.
activation:
Return:
Transposed convolution ``inputs``.
"""
x = keras.layers.Conv2DTranspose(
filters=filters,
kernel_size=(4, 4),
strides=(2, 2),
padding='same',
activation=None,
data_format="channels_first")(x)
if use_batchnorm:
if freeze_bn:
x = keras.layers.BatchNormalization(axis=1, epsilon=1e-2)(x, training=False)
else:
x = keras.layers.BatchNormalization(axis=1, epsilon=1e-2)(x)
return x
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/unet/model/layers.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""test UNet Model Building."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import keras
import pytest
from nvidia_tao_tf1.cv.unet.model.build_unet_model import build_model, select_model_proto
from nvidia_tao_tf1.cv.unet.model.utilities import build_target_class_list, get_num_unique_train_ids
from nvidia_tao_tf1.cv.unet.model.utilities import initialize
from nvidia_tao_tf1.cv.unet.proto.model_config_pb2 import ModelConfig
from nvidia_tao_tf1.cv.unet.spec_handler.spec_loader import load_experiment_spec
@pytest.mark.parametrize("arch, nlayers",
[
('vanilla_unet_dynamic', 1),
('efficientnet_b0', 1),
('resnet', 10),
('resnet', 18),
('resnet', 34),
('resnet', 50),
('resnet', 101),
('vgg', 16),
('vgg', 19)
])
def test_unet(arch, nlayers):
file_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
spec_path = os.path.join(file_path, '../../unet/experiment_specs/test_isbi.txt')
experiment_spec = load_experiment_spec(spec_path, merge_from_default=False)
for init in [ModelConfig.KernelInitializer.HE_UNIFORM,
ModelConfig.KernelInitializer.HE_NORMAL,
ModelConfig.KernelInitializer.GLOROT_UNIFORM]:
# Initialize the environment
initialize(experiment_spec)
target_classes = build_target_class_list(
experiment_spec.dataset_config.data_class_config)
model_config = select_model_proto(experiment_spec)
model_config.arch = arch
model_config.num_layers = nlayers
model_config.initializer = init
for use_batch_norm in [True, False]:
for freeze_bn in [False, False]:
model_config.use_batch_norm = use_batch_norm
model_config.freeze_bn = freeze_bn
unet_model = build_model(m_config=model_config,
target_class_names=target_classes)
img_height, img_width, img_channels = \
model_config.model_input_height, \
model_config.model_input_width, \
model_config.model_input_channels
num_classes = get_num_unique_train_ids(target_classes)
unet_model.construct_model(input_shape=(img_channels,
img_height, img_width))
assert(unet_model.keras_model.layers[-1].output_shape[1:] ==
(num_classes, img_height, img_width))
unet_model.keras_model.summary()
keras.backend.clear_session()
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/unet/model/tests/test_model_builder.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Folder defining spec handling module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/unet/spec_handler/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Load an experiment spec file to run GridBox training, evaluation, pruning."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import os
from google.protobuf.text_format import Merge as merge_text_proto
import nvidia_tao_tf1.cv.unet.proto.experiment_pb2 as experiment_pb2
logger = logging.getLogger(__name__)
def load_proto(spec_path, proto_buffer, default_spec_path=None, merge_from_default=True):
"""Load spec from file and merge with given proto_buffer instance.
Args:
spec_path (str): location of a file containing the custom spec proto.
proto_buffer(pb2): protocal buffer instance to be loaded.
default_spec_path(str): location of default spec to use if merge_from_default is True.
merge_from_default (bool): disable default spec, if False, spec_path must be set.
Returns:
proto_buffer(pb2): protocol buffer instance updated with spec.
"""
def _load_from_file(filename, pb2):
if not os.path.exists(filename):
raise IOError("Specfile not found at: {}".format(filename))
with open(filename, "r") as f:
merge_text_proto(f.read(), pb2)
# Setting this flag false prevents concatenating repeated-fields
if merge_from_default:
assert default_spec_path, \
"default spec path has to be defined if merge_from_default is enabled"
# Load the default spec
_load_from_file(default_spec_path, proto_buffer)
else:
assert spec_path, "spec_path has to be defined, if merge_from_default is disabled"
# Merge a custom proto on top of the default spec, if given
if spec_path:
logger.info("Merging specification from %s", spec_path)
_load_from_file(spec_path, proto_buffer)
return proto_buffer
def load_experiment_spec(spec_path=None, merge_from_default=True):
"""Load experiment spec from a .txt file and return an experiment_pb2.Experiment object.
Args:
spec_path (str): location of a file containing the custom experiment spec proto.
dataset_export_spec_paths (list of str): paths to the dataset export specs.
merge_from_default (bool): disable default spec, if False, spec_path must be set.
Returns:
experiment_spec: protocol buffer instance of type experiment_pb2.Experiment.
"""
experiment_spec = experiment_pb2.Experiment()
file_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
default_spec_path = os.path.join(file_path, 'experiment_specs/default_spec.txt')
experiment_spec = load_proto(spec_path, experiment_spec, default_spec_path,
merge_from_default)
return experiment_spec
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/unet/spec_handler/spec_loader.py |
# Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
"""IVA Unet experiment specs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/unet/experiment_specs/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base class to export trained .tlt models to etlt file."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import logging
import os
import random
import tempfile
import onnx
from six.moves import xrange
import tensorflow as tf
from tensorflow.compat.v1 import GraphDef
import tf2onnx
from tqdm import tqdm
from nvidia_tao_tf1.core.export._onnx import keras_to_onnx
from nvidia_tao_tf1.core.export._uff import keras_to_pb
from nvidia_tao_tf1.cv.common.export.keras_exporter import KerasExporter as Exporter
from nvidia_tao_tf1.cv.common.export.tensorfile import TensorFile
from nvidia_tao_tf1.cv.unet.export.unet_ds_config import UnetDSConfig
from nvidia_tao_tf1.cv.unet.model.model_io import check_for_quantized_layers, load_keras_model
from nvidia_tao_tf1.cv.unet.model.utilities import build_target_class_list, get_train_class_mapping
from nvidia_tao_tf1.cv.unet.model.utilities import initialize, initialize_params
from nvidia_tao_tf1.cv.unet.spec_handler.spec_loader import load_experiment_spec
from nvidia_tao_tf1.cv.unet.utils.data_loader import Dataset
logger = logging.getLogger(__name__)
class UNetExporter(Exporter):
"""Exporter class to export a trained UNet model."""
def __init__(self, model_path=None,
key=None,
data_type="fp32",
strict_type=False,
experiment_spec_path="",
backend="onnx",
**kwargs):
"""Instantiate the UNet exporter to export a trained UNet .tlt model.
Args:
model_path(str): Path to the UNet model file.
key (str): Key to decode the model.
data_type (str): Backend data-type for the optimized TensorRT engine.
strict_type(bool): Apply TensorRT strict_type_constraints or not for INT8 mode.
experiment_spec_path (str): Path to UNet experiment spec file.
backend (str): Type of intermediate backend parser to be instantiated.
"""
super(UNetExporter, self).__init__(model_path=model_path,
key=key,
data_type=data_type,
strict_type=strict_type,
backend=backend,
**kwargs)
self.experiment_spec_path = experiment_spec_path
assert os.path.isfile(self.experiment_spec_path), \
"Experiment spec file not found at {}.".format(self.experiment_spec_path)
self.experiment_spec = load_experiment_spec(self.experiment_spec_path,
merge_from_default=False)
self.keras_model, self.custom_objs = load_keras_model(self.experiment_spec,
self.model_path,
export=True, key=self.key)
self.enable_qat = self.experiment_spec.model_config.enable_qat
self.model_arch = self.experiment_spec.model_config.arch
self.enable_qat = self.experiment_spec.model_config.enable_qat
self.model_arch = self.experiment_spec.model_config.arch
self.export_route = "pb2onnx"
initialize(self.experiment_spec)
# Initialize Params
self.params = initialize_params(self.experiment_spec)
def load_model(self, backend="onnx"):
"""Simple function to load the UNet Keras model."""
model = self.keras_model
if check_for_quantized_layers(model):
logger.info("INT8 quantization using QAT")
model, self.tensor_scale_dict = self.extract_tensor_scale(model, backend)
return model
def set_keras_backend_dtype(self):
"""skip."""
pass
def pb_to_onnx(self, input_filename, output_filename, input_node_names,
output_node_names, target_opset=None):
"""Convert a TensorFlow model to ONNX.
The input model needs to be passed as a frozen Protobuf file.
The exported ONNX model may be parsed and optimized by TensorRT.
Args:
input_filename (str): path to protobuf file.
output_filename (str): file to write exported model to.
input_node_names (list of str): list of model input node names as
returned by model.layers[some_idx].get_output_at(0).name.split(':')[0].
output_node_names (list of str): list of model output node names as
returned by model.layers[some_idx].get_output_at(0).name.split(':')[0].
target_opset (int): Target opset version to use, default=<default opset for
the current keras2onnx installation>
Returns:
tuple<in_tensor_name(s), out_tensor_name(s):
in_tensor_name(s): The name(s) of the input nodes. If there is
only one name, it will be returned as a single string, otherwise
a list of strings.
out_tensor_name(s): The name(s) of the output nodes. If there is only one name,
it will be returned as a single string, otherwise a list of strings.
"""
graphdef = GraphDef()
with tf.gfile.GFile(input_filename, "rb") as frozen_pb:
graphdef.ParseFromString(frozen_pb.read())
if not isinstance(input_node_names, list):
input_node_names = [input_node_names]
if not isinstance(output_node_names, list):
output_node_names = [output_node_names]
# The ONNX parser requires tensors to be passed in the node_name:port_id format.
# Since we reset the graph below, we assume input and output nodes have a single port.
input_node_names = ["{}:0".format(node_name) for node_name in input_node_names]
output_node_names = ["{}:0".format(node_name) for node_name in output_node_names]
tf.reset_default_graph()
with tf.Graph().as_default() as tf_graph:
tf.import_graph_def(graphdef, name="")
onnx_graph = tf2onnx.tfonnx.process_tf_graph(
tf_graph,
input_names=input_node_names,
output_names=output_node_names,
continue_on_error=True,
verbose=True,
opset=target_opset,
)
onnx_graph = tf2onnx.optimizer.optimize_graph(onnx_graph)
model_proto = onnx_graph.make_model("test")
with open(output_filename, "wb") as f:
f.write(model_proto.SerializeToString())
# Reload and check ONNX model.
onnx_model = onnx.load(output_filename)
onnx.checker.check_model(onnx_model)
# Return a string instead of a list if there is only one input or output.
if len(input_node_names) == 1:
input_node_names = input_node_names[0]
if len(output_node_names) == 1:
output_node_names = output_node_names[0]
return input_node_names, output_node_names
def optimize_onnx(self, onnx_path):
""""Function to optimize the ONNX by removing softmax."""
model = onnx.load(onnx_path)
copied_model = copy.deepcopy(model)
graph = copied_model.graph
if self.params.activation == "sigmoid":
pass
else:
softmax_1 = [node for node in graph.node if 'softmax' in node.name]
# Record the input node names before removing the node
softmax_1_inp_node = softmax_1[0].input
graph.node.remove(softmax_1[0])
graph.output.remove(graph.output[0])
# Create ArgMax node
# Input shape is Nxhxwx2. Output shape is Nxhxwx1.
output_tensor = onnx.helper.make_tensor_value_info(
'argmax_1', onnx.TensorProto.INT64,
('N', self.experiment_spec.model_config.model_input_height,
self.experiment_spec.model_config.model_input_width, 1))
graph.output.append(output_tensor)
# Last axis - input tensor shape is Nx544x960x2.
argmax_node = onnx.helper.make_node(
'ArgMax', softmax_1_inp_node, ['argmax_1'], axis=-1, keepdims=1)
graph.node.append(argmax_node)
onnx.checker.check_model(copied_model)
logger.info('New input/output')
logger.info(graph.input)
logger.info(graph.output)
onnx.save(copied_model, onnx_path)
output_names = [node.name for node in copied_model.graph.output]
return output_names
def convert_to_onnx(self, model, export_route, tmp_onnx_file):
"""Function to model to ONNX based on the export_route."""
if export_route == "keras2onnx":
keras_to_onnx(model,
tmp_onnx_file,
custom_objects=self.custom_objs,
target_opset=self.target_opset)
elif export_route == "pb2onnx":
if self.target_opset != 11:
logger.warning("UNet uses target opset of 11 by default."
"Overriding the provided opset {} to 11.".format(self.target_opset))
target_opset = 11
output_pb_filename = tmp_onnx_file.replace(".onnx", ".pb")
in_tensor_names, out_tensor_names, __ = keras_to_pb(model,
output_pb_filename,
output_node_names=None,
custom_objects=self.custom_objs)
(_, _) = self.pb_to_onnx(output_pb_filename,
tmp_onnx_file,
in_tensor_names,
out_tensor_names,
target_opset)
os.remove(output_pb_filename)
def save_exported_file(self, model, output_file_name):
"""save an ONNX file.
Args:
model (keras.model.Model): Decoded keras model to be exported.
output_file_name (str): Path to the output file.
Returns:
tmp_uff_file (str): Path to the temporary uff file.
"""
if self.backend == "onnx":
# TO DO: Remove the below line. It is to experiment the trtexec.
# generate encoded onnx model with empty string as input node name
self.convert_to_onnx(model, self.export_route, output_file_name)
output_names = self.optimize_onnx(output_file_name)
# Update output names here for the modified graph
self.output_node_names = output_names
tf.reset_default_graph()
logger.info("Converted model was saved into %s", output_file_name)
return output_file_name
raise NotImplementedError("Invalid backend provided. {}".format(self.backend))
def set_input_output_node_names(self):
"""Set input output node names."""
model = self.load_model()
output_name = [node.op.name for node in model.outputs]
input_name = [node.op.name for node in model.inputs]
output_node_name = output_name[0].split("/")[0]
input_node_name = input_name[0].split("/")[0]
self.output_node_names = [output_node_name]
self.input_node_names = [input_node_name]
def set_data_preprocessing_parameters(self, input_dims, image_mean):
"""Set data pre-processing parameters for the int8 calibration."""
num_channels = input_dims[0]
if num_channels == 3:
means = [127.5, 127.5, 127.5]
else:
means = [127.5]
self.preprocessing_arguments = {"scale": 1.0 / 127.5,
"means": means,
"flip_channel": True}
def generate_ds_config(self, input_dims, num_classes=None):
"""Generate Deepstream config element for the exported model."""
if input_dims[0] == 1:
color_format = "l"
else:
color_format = "bgr" if self.preprocessing_arguments["flip_channel"] else "rgb"
kwargs = {
"data_format": self.data_format,
"backend": self.backend,
# Setting this to 2 by default since it is semantic segmentation
}
if self.params.activation == "sigmoid":
kwargs["network_type"] = 2
kwargs["output_tensor_meta"] = 0
else:
kwargs["network_type"] = 100
# This is set to output the fibal inference image
kwargs["output_tensor_meta"] = 1
if num_classes:
kwargs["num_classes"] = num_classes
ds_config = UnetDSConfig(
self.preprocessing_arguments["scale"],
self.preprocessing_arguments["means"],
input_dims,
color_format,
self.key,
segmentation_threshold=0.0,
output_blob_names=self.output_node_names[0],
segmentation_output_order=1,
**kwargs
)
return ds_config
def get_class_labels(self):
"""Get list of class labels to serialize to a labels.txt file."""
if self.experiment_spec is None:
raise AttributeError(
"Experiment spec wasn't loaded. To get class labels "
"please provide the experiment spec file using the -e "
"option.")
target_labels = []
target_classes = build_target_class_list(
self.experiment_spec.dataset_config.data_class_config)
train_id_name_mapping = get_train_class_mapping(target_classes)
num_classes = len(train_id_name_mapping)
for class_id in range(num_classes):
target_labels.append(train_id_name_mapping[class_id][0])
return target_labels
def generate_tensor_file(self, data_file_name,
calibration_images_dir,
input_dims, n_batches=10,
batch_size=1, image_mean=None):
"""Generate calibration Tensorfile for int8 calibrator.
This function generates a calibration tensorfile from a directory of images, or dumps
n_batches of random numpy arrays of shape (batch_size,) + (input_dims).
Args:
data_file_name (str): Path to the output tensorfile to be saved.
calibration_images_dir (str): Path to the images to generate a tensorfile from.
input_dims (list): Input shape in CHW order.
n_batches (int): Number of batches to be saved.
batch_size (int): Number of images per batch.
image_mean (list): Image mean per channel.
Returns:
No explicit returns.
"""
# Initialize the environment
initialize(self.experiment_spec)
# Initialize Params
params = initialize_params(self.experiment_spec)
params["experiment_spec"] = self.experiment_spec
target_classes = build_target_class_list(
self.experiment_spec.dataset_config.data_class_config)
dataset = Dataset(
batch_size=batch_size,
params=params,
target_classes=target_classes)
# Preparing the list of images to be saved.
num_images = n_batches * batch_size
valid_image_ext = ['jpg', 'jpeg', 'png']
image_list = dataset.image_names_list
if not len(image_list) > 0:
if os.path.exists(calibration_images_dir):
image_list = [os.path.join(calibration_images_dir, image)
for image in os.listdir(calibration_images_dir)
if image.split('.')[-1] in valid_image_ext]
if len(image_list) > 0:
if len(image_list) < num_images:
raise ValueError('Not enough number of images provided:'
' {} < {}'.format(len(image_list), num_images))
image_idx = random.sample(xrange(len(image_list)), num_images)
self.set_data_preprocessing_parameters(input_dims, image_mean)
# Writing out processed dump.
with TensorFile(data_file_name, 'w') as f:
for chunk in tqdm(image_idx[x:x+batch_size] for x in xrange(0, len(image_idx),
batch_size)):
dump_data = self.prepare_chunk(chunk, image_list,
image_width=input_dims[2],
image_height=input_dims[1],
channels=input_dims[0],
batch_size=batch_size,
**self.preprocessing_arguments)
f.write(dump_data)
f.closed
else:
# Calibration images are not present in cal image dir or experiment spec
logger.info("Generating a tensorfile with random tensor images. This may work well as "
"a profiling tool, however, it may result in inaccurate results at "
"inference. Please generate a tensorfile using the tlt-int8-tensorfile, "
"or provide a custom directory of images for best performance.")
self.generate_random_tensorfile(data_file_name,
input_dims,
n_batches=n_batches,
batch_size=batch_size)
def get_input_dims_from_model(self, model=None):
"""Read input dimensions from the model.
Args:
model (keras.models.Model): Model to get input dimensions from.
Returns:
input_dims (tuple): Input dimensions.
"""
if model is None:
raise IOError("Invalid model object.")
input_dims = model.layers[1].input_shape[1:]
return input_dims
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/unet/export/unet_exporter.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Script to export a trained UNet model to an ETLT file for deployment."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/unet/export/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configuration unet derived class for a DeepStream graph."""
from nvidia_tao_tf1.cv.common.types.base_ds_config import BaseDSConfig
class UnetDSConfig(BaseDSConfig):
"""Configuration element for an nvinfer ds plugin."""
def __init__(self, *args, segmentation_threshold=None, output_blob_names=None,
segmentation_output_order=None, **kwargs):
"""Init function.
Args:
num_layers (int): Number of layers for scalable feature extractors.
use_pooling (bool): Whether to add pooling layers to the feature extractor.
use_batch_norm (bool): Whether to add batch norm layers.
dropout_rate (float): Fraction of the input units to drop. 0.0 means dropout is
not used.
target_class_names (list): A list of target class names.
freeze_pretrained_layers (bool): Prevent updates to pretrained layers' parameters.
allow_loaded_model_modification (bool): Allow loaded model modification.
template (str): Model template to use for feature extractor.
freeze_bn (bool): The boolean to freeze BN or not.
load_graph (bool): The boolean to laod graph for phase 1.
segmentation_threshold (float): Threshold to classify a mask.
output_blob_names (str): Output name of the model graph.
segmentation_output_order (str): The output order it channel last.
"""
super(UnetDSConfig, self).__init__(*args, **kwargs)
self.segmentation_threshold = segmentation_threshold
self.output_blob_names = output_blob_names
self.segmentation_output_order = segmentation_output_order
def get_config(self):
"""Generate config elements."""
config_dict = super().get_config()
config_dict.update({"segmentation-threshold": self.segmentation_threshold,
"output-blob-names": self.output_blob_names,
"segmentation-output-order": self.segmentation_output_order})
return config_dict
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/unet/export/unet_ds_config.py |
tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/yolo_v3/__init__.py |
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""YOLOv3 Loss for training."""
import tensorflow as tf
from nvidia_tao_tf1.cv.common.losses.base_loss import BaseLoss
from nvidia_tao_tf1.cv.ssd.utils.box_utils import iou
class YOLOv3Loss(BaseLoss):
'''
YOLOv3 Loss class.
See details here: https://arxiv.org/pdf/1506.02640.pdf
'''
def __init__(self,
lambda_coord=5.0,
lambda_no_obj=50.0,
lambda_cls=1.0,
ignore_threshold=0.7):
'''
Loss init function.
NOTE: obj classification loss weight for positive sample is fixed at 1
Args:
lambda_coord: coord (bbox regression) loss weight
lambda_no_obj: obj classification loss weight for negative sample
lambda_cls: classification loss weight for positive sample
ignore_threshold: iou threshold to ignore boxes when calculating negative obj loss
'''
self.lambda_coord = lambda_coord
self.lambda_no_obj = lambda_no_obj
self.lambda_cls = lambda_cls
self.ignore_threshold = ignore_threshold
def decode_bbox(self, cy, cx, h, w):
"""Decode bbox from (cy, cx, h, w) format to (xmin, ymin, xmax, ymax) format."""
x_min = cx - 0.5 * w
x_max = cx + 0.5 * w
y_min = cy - 0.5 * h
y_max = cy + 0.5 * h
return tf.stack([x_min, y_min, x_max, y_max], -1)
def compute_loss(self, y_true, y_pred):
'''
Compute the loss of the YOLO v3 model prediction against the ground truth.
Arguments:
y_true (tensor): array of `(batch_size, #boxes,
[cy, cx, h, w, objectness, cls])`
y_pred (tensor): array of `(batch_size, #boxes,
[cy, cx, ph, pw, step_y, step_x, pred_y, pred_x, pred_h, pred_w, object, cls...])`
Returns:
A scalar, the total multitask loss for classification and localization.
'''
gt_t_yx = tf.truediv(y_true[:, :, 0:2] - y_pred[:, :, 0:2], y_pred[:, :, 4:6])
# limit w and h within 0.01 to 100 times of anchor size to avoid gradient explosion.
# if boxes' true shift is not within this range, the anchor is very very badly chosen...
gt_t_hw = tf.log(tf.clip_by_value(tf.truediv(y_true[:, :, 2:4], y_pred[:, :, 2:4]),
1e-2, 1e2))
# assign more loc loss weights to smaller boxes
loss_scale = 2.0 - y_true[:, :, 2] * y_true[:, :, 3]
pred_t_yx = tf.sigmoid(y_pred[:, :, 6:8])
loc_loss = self.L2_loss(tf.concat([gt_t_yx, gt_t_hw], axis=-1),
tf.concat([pred_t_yx, y_pred[:, :, 8:10]], axis=-1))
loc_loss = loc_loss * y_true[:, :, 4] * loss_scale
obj_loss = self.bce_loss(y_true[:, :, 4:5], y_pred[:, :, 10:11])
def max_iou_fn(x):
# x[0] is y_true (#bbox, ...), x[1] is pred_yx (#bbox, ...), x[2] is pred_hw
# we need to calculate neutral bboxes.
valid_bbox = tf.boolean_mask(x[0], x[0][:, 4])
# shape (batch_size, #boxes, 4)
valid_bbox = self.decode_bbox(valid_bbox[:, 0], valid_bbox[:, 1],
valid_bbox[:, 2], valid_bbox[:, 3])
pred_bbox = self.decode_bbox(x[1][:, 0], x[1][:, 1],
x[2][:, 0], x[2][:, 1])
return tf.reduce_max(iou(pred_bbox, valid_bbox), -1)
pred_yx = pred_t_yx * y_pred[:, :, 4:6] + y_pred[:, :, 0:2]
pred_hw = tf.exp(y_pred[:, :, 8:10]) * y_pred[:, :, 2:4]
max_iou = tf.map_fn(max_iou_fn, (y_true, pred_yx, pred_hw), dtype=tf.float32)
is_neg = tf.cast(tf.less(max_iou, self.ignore_threshold), tf.float32)
# Do not count positive box as negative even it's iou is small!
is_neg = (1. - y_true[:, :, 4]) * is_neg
# we need to avoid divide by zero errors
pos_box_count = tf.maximum(tf.reduce_sum(y_true[:, :, 4]), 1e-5)
neg_box_count = tf.maximum(tf.reduce_sum(is_neg), 1e-5)
loc_loss = tf.truediv(tf.reduce_sum(loc_loss), pos_box_count)
obj_pos_loss = tf.truediv(tf.reduce_sum(obj_loss * y_true[:, :, 4]), pos_box_count)
obj_neg_loss = tf.truediv(tf.reduce_sum(obj_loss * is_neg), neg_box_count)
cls_loss = tf.truediv(tf.reduce_sum(self.bce_loss(y_true[:, :, 5:],
y_pred[:, :, 11:]) * y_true[:, :, 4]),
pos_box_count)
total_loss = self.lambda_coord * loc_loss + obj_pos_loss + self.lambda_cls * cls_loss + \
self.lambda_no_obj * obj_neg_loss
return total_loss
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/yolo_v3/losses/yolo_loss.py |
tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/yolo_v3/losses/__init__.py |
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""test yolo loss."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
# Forcing the default GPU to be index 0
# because TensorFlow tries to set idx to 1
# with an XLA error.
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
import tensorflow as tf
from nvidia_tao_tf1.cv.yolo_v3.losses.yolo_loss import YOLOv3Loss
def convert_pred_to_true_match(y_pred):
# "construct" GT that matches pred
y_true_02 = tf.sigmoid(y_pred[:, :, 6:8]) * y_pred[:, :, 4:6] + y_pred[:, :, 0:2]
y_true_24 = tf.exp(y_pred[:, :, 8:10]) * y_pred[:, :, 2:4]
y_true_45 = tf.sigmoid(y_pred[:, :, 10:11])
y_true_6_ = tf.sigmoid(y_pred[:, :, 11:])
# return constructed GT
return tf.concat([y_true_02, y_true_24, y_true_45, y_true_6_], -1)
def test_loss_zero():
# let's give a large coef on loss
yolo_loss = YOLOv3Loss(10, 1000, 10, 1.)
y_pred = tf.constant([[[100.0, 130.0, 2.0, 2.5, 3.0, 3.5, 1e10,
-5.0, 0.3, 1.5, 1e99, 1e99, -1e99, -1e99]]])
y_true = convert_pred_to_true_match(y_pred)
with tf.Session() as sess:
assert abs(sess.run(yolo_loss.compute_loss(y_true, y_pred))) < 1e-5
def test_loss_nonzero_x():
yolo_loss = YOLOv3Loss(10, 1000, 10, 1.)
y_pred = tf.constant([[[100.0, 130.0, 2.0, 2.5, 3.0, 3.5, 1e10,
-5.0, 0.3, 1.5, 1e99, 1e99, -1e99, -1e99]]])
y_true = convert_pred_to_true_match(y_pred)
# turbulate pred_x
y_pred = tf.constant([[[100.0, 130.0, 2.0, 2.5, 3.0, 3.5, 1e10,
-6, 0.3, 1.5, 1e99, 1e99, -1e99, -1e99]]])
# eliminate coord loss
yolo_loss1 = YOLOv3Loss(0, 1000, 10, 1.)
with tf.Session() as sess:
assert abs(sess.run(yolo_loss.compute_loss(y_true, y_pred)) + 0.0025146198) < 1e-5
assert abs(sess.run(yolo_loss1.compute_loss(y_true, y_pred))) < 1e-5
def test_loss_nonzero_y():
yolo_loss = YOLOv3Loss(10, 1000, 10, 1.)
y_pred = tf.constant([[[100.0, 130.0, 2.0, 2.5, 3.0, 3.5, 1e10,
-5.0, 0.3, 1.5, 1e99, 1e99, -1e99, -1e99]]])
y_true = convert_pred_to_true_match(y_pred)
# turbulate pred_y
y_pred = tf.constant([[[100.0, 130.0, 2.0, 2.5, 3.0, 3.5, 1.0,
-5.0, 0.3, 1.5, 1e99, 1e99, -1e99, -1e99]]])
# eliminate coord loss
yolo_loss1 = YOLOv3Loss(0, 1000, 10, 1.)
with tf.Session() as sess:
assert abs(sess.run(yolo_loss.compute_loss(y_true, y_pred)) + 10.215902) < 1e-5
assert abs(sess.run(yolo_loss1.compute_loss(y_true, y_pred))) < 1e-5
def test_loss_nonzero_wh():
yolo_loss = YOLOv3Loss(10, 1000, 10, 1.)
y_pred = tf.constant([[[100.0, 130.0, 2.0, 2.5, 3.0, 3.5, 1e10,
-5.0, 0.3, 1.5, 1e99, 1e99, -1e99, -1e99]]])
y_true = convert_pred_to_true_match(y_pred)
# turbulate pred_wh
y_pred = tf.constant([[[100.0, 130.0, 2.0, 2.5, 3.0, 3.5, 1e10,
-5.0, 0.9, 1.8, 1e99, 1e99, -1e99, -1e99]]])
# eliminate coord loss
yolo_loss1 = YOLOv3Loss(0, 1000, 10, 1.)
with tf.Session() as sess:
assert abs(sess.run(yolo_loss.compute_loss(y_true, y_pred)) + 63.55852) < 1e-5
assert abs(sess.run(yolo_loss1.compute_loss(y_true, y_pred))) < 1e-5
def test_loss_nonzero_obj():
yolo_loss = YOLOv3Loss(10, 1000, 10, 1.)
y_pred = tf.constant([[[100.0, 130.0, 2.0, 2.5, 3.0, 3.5, 1e10,
-5.0, 0.3, 1.5, 1e99, 1e99, -1e99, -1e99]]])
y_true = convert_pred_to_true_match(y_pred)
# turbulate pred_obj
y_pred = tf.constant([[[100.0, 130.0, 2.0, 2.5, 3.0, 3.5, 1e10,
-5.0, 0.3, 1.5, -1e99, 1e99, -1e99, -1e99]]])
with tf.Session() as sess:
assert abs(sess.run(yolo_loss.compute_loss(y_true, y_pred)) - 41.446533) < 1e-5
def test_loss_nonzero_cls():
yolo_loss = YOLOv3Loss(10, 1000, 10, 1.)
y_pred = tf.constant([[[100.0, 130.0, 2.0, 2.5, 3.0, 3.5, 1e10,
-5.0, 0.3, 1.5, 1e99, 1e99, -1e99, -1e99]]])
y_true = convert_pred_to_true_match(y_pred)
# turbulate pred_cls
y_pred = tf.constant([[[100.0, 130.0, 2.0, 2.5, 3.0, 3.5, 1e10,
-5.0, 0.3, 1.5, 1e99, -1e99, 1e99, -1e99]]])
# eliminate cls loss
yolo_loss1 = YOLOv3Loss(10, 1000, 0, 1.)
with tf.Session() as sess:
assert abs(sess.run(yolo_loss.compute_loss(y_true, y_pred)) - 828.9306640625) < 1e-5
assert abs(sess.run(yolo_loss1.compute_loss(y_true, y_pred))) < 1e-5
def test_loss_nonzero_noobj():
yolo_loss = YOLOv3Loss(10, 1, 10, 1.)
y_pred = tf.constant([[[100.0, 130.0, 2.0, 2.5, 3.0, 3.5, 1e10,
-5.0, 0.3, 1.5, -1e99, 1e99, -1e99, -1e99]]])
y_true = convert_pred_to_true_match(y_pred)
# turbulate pred_y
y_pred = tf.constant([[[100.0, 130.0, 2.0, 2.5, 3.0, 3.5, 1e10,
-5.0, 0.3, 1.5, 1e99, 1e99, -1e99, -1e99]]])
# eliminate noobj loss
yolo_loss1 = YOLOv3Loss(10, 0, 10, 1.)
# turbulate everything other than obj
y_pred1 = tf.constant([[[100.0, 130.0, 2.0, 2.5, 3.0, 3.5, 1,
5e10, 1.5, 0.1, -1e99, 0, 0, 0]]])
with tf.Session() as sess:
assert abs(sess.run(yolo_loss.compute_loss(y_true, y_pred)) - 41.446533) < 1e-5
assert abs(sess.run(yolo_loss1.compute_loss(y_true, y_pred))) < 1e-5
assert abs(sess.run(yolo_loss1.compute_loss(y_true, y_pred1))) < 1e-5
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/yolo_v3/losses/tests/test_loss.py |
"""YOLO entry point."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from maglev_sdk.docker_container.entrypoint import main
if __name__ == '__main__':
main('yolo_v3', 'nvidia_tao_tf1/cv/yolo_v3/scripts')
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/yolo_v3/docker/yolo_v3.py |
tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/yolo_v3/dataio/__init__.py |
|
# Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
'''Numpy implementation of YOLOv3 label encoder.'''
import numpy as np
import tensorflow as tf
def iou(boxes1, boxes2):
'''
numpy version of vectorized iou.
Let `boxes1` and `boxes2` contain `m` and `n` boxes, respectively.
returns an `(m, n)` matrix with the IoUs for all pairwise boxes
Arguments:
boxes1 (array of shape (m, 4)): x_min, y_min, x_max, y_max
boxes2 (array of shape (n, 4)): x_min, y_min, x_max, y_max
Returns:
IOU (array of shape (m, n)): IOU score
'''
# Compute the IoU.
xmin1, ymin1, xmax1, ymax1 = np.split(boxes1, 4, axis=1)
xmin2, ymin2, xmax2, ymax2 = np.split(boxes2, 4, axis=1)
xmin = np.maximum(xmin1, xmin2.T)
ymin = np.maximum(ymin1, ymin2.T)
xmax = np.minimum(xmax1, xmax2.T)
ymax = np.minimum(ymax1, ymax2.T)
intersection = np.maximum(xmax - xmin, 0) * np.maximum(ymax - ymin, 0)
boxes1_areas = (xmax1 - xmin1) * (ymax1 - ymin1)
boxes2_areas = (xmax2 - xmin2) * (ymax2 - ymin2)
union = boxes1_areas + boxes2_areas.T - intersection
return intersection / union
class YOLOv3InputEncoder:
'''
Encoder class.
Transforms ground truth labels for object detection in images
(2D bounding box coordinates and class labels) to the format required for
training an YOLO v3 model.
In the process of encoding the ground truth labels, a template of anchor boxes
is being built, which are subsequently matched to the ground truth boxes
via an intersection-over-union threshold criterion.
args:
img_height: image height (how many pixels in height)
img_width: image width (how many pixels in width)
n_classes: Number of all possible classes.
feature_map_stride: List of length n and format [(h_stride, w_stride), ...], n is number of
feature maps. Stride is `input_size / fmap_size` and must be integer. The original paper
input image is (416, 416) and feature map size is [(13, 13), (26, 26), (52, 52)]. The
corresponding feature_map_stride should be [(32, 32), (16, 16), (8, 8)]
anchors: List of 3 elements indicating the anchor boxes shape on feature maps. first element
is for smallest feature map (i.e. to detect large objects). Last element is for largest
feature map (i.e. to detect small objects). Each element is a list of tuples of size 2,
in the format of (w, h). The length of the list can be any integer larger than 0. All
w and h needs to be in range (0, 1) and this is (anchor_w / img_w, anchor_h / img_h)
'''
def __init__(self, # pylint: disable=W0102
n_classes,
feature_map_stride=[(32, 32), (16, 16), (8, 8)],
anchors=[[(0.279, 0.216), (0.375, 0.476), (0.897, 0.784)],
[(0.072, 0.147), (0.149, 0.108), (0.142, 0.286)],
[(0.024, 0.031), (0.038, 0.072), (0.079, 0.055)]]):
'''See class documentation for details.'''
assert len(feature_map_stride) == len(anchors), "anchors and feature maps mismatch!"
self.n_classes = n_classes
self.fmap_num_anchors = [len(x) for x in anchors]
self.feature_map_stride = feature_map_stride
# maps box to fmap and corresponding box ind. {box_ind => (fmap, box_ind inside fmap)}
self.fmap_box_dict = {}
box_cnt = 0
for idx, i in enumerate(anchors):
for j, _ in enumerate(i):
self.fmap_box_dict[box_cnt] = (idx, j)
box_cnt += 1
# w, h shape (9, 1)
w, h = np.split(np.array(anchors).reshape(-1, 2), 2, axis=-1)
# yxyx
self.anchor_boxes = np.concatenate([-h / 2.0, -w / 2.0, h / 2.0, w / 2.0], axis=-1)
self.one_hot_cls = np.eye(n_classes)
def _loc_map_fn(self, output_img_size, cx, cy, anchor_idx):
"""Helper function to get location of anchor match.
Returns:
fmap_id, fmap_y, fmap_x, anchor_id_in_fmap
"""
fmap_idx, anchor_idx_fmap = self.fmap_box_dict[anchor_idx]
w_step = self.feature_map_stride[fmap_idx][1] / float(output_img_size[0])
h_step = self.feature_map_stride[fmap_idx][0] / float(output_img_size[1])
fmap_x = int(np.floor(np.clip(cx / w_step, 0, 1.0 / w_step - 1e-3)))
fmap_y = int(np.floor(np.clip(cy / h_step, 0, 1.0 / h_step - 1e-3)))
return fmap_idx, fmap_y, fmap_x, anchor_idx_fmap
def __call__(self, output_img_size, gt_label):
'''
Processing one image groundtruthing.
Args:
output_img_size: (w, h) 2 integers representing image size
gt_label: (#boxes, [class_idx, is_difficult, x_min, y_min, x_max, y_max])
Returns:
encoded_target: `(#anchor_boxes, [cy, cx, h, w, objectness, cls])`
'''
encoding_template = []
for fmap_id, num_anchor in enumerate(self.fmap_num_anchors):
template = np.zeros((output_img_size[1] // self.feature_map_stride[fmap_id][0],
output_img_size[0] // self.feature_map_stride[fmap_id][1],
num_anchor,
5 + self.n_classes), dtype=np.float)
encoding_template.append(template)
# all shape (#boxes, 1)
cls_id, _, xmin, ymin, xmax, ymax = np.split(gt_label, 6, axis=-1)
cy = (ymin + ymax) / 2.0
cx = (xmin + xmax) / 2.0
h = ymax - ymin
w = xmax - xmin
gt_shape = np.concatenate([-h / 2.0, -w / 2.0, h / 2.0, w / 2.0], axis=-1)
ious = iou(gt_shape, self.anchor_boxes)
for gt_idx, gt_iou in enumerate(ious):
gt_cx = cx[gt_idx, 0]
gt_cy = cy[gt_idx, 0]
gt_h = h[gt_idx, 0]
gt_w = w[gt_idx, 0]
# process best match
loc = self._loc_map_fn(output_img_size, gt_cx, gt_cy, np.argmax(gt_iou))
gt_array = np.concatenate(([gt_cy, gt_cx, gt_h, gt_w, 1.0],
self.one_hot_cls[int(round(cls_id[gt_idx, 0]))]))
encoding_template[loc[0]][loc[1], loc[2], loc[3], :] = gt_array
encoding_template = [x.reshape(-1, 5 + self.n_classes) for x in encoding_template]
return np.concatenate(encoding_template, axis=0)
class YOLOv3InputEncoderTensor:
'''
Encoder class.
Transforms ground truth labels for object detection in images
(2D bounding box coordinates and class labels) to the format required for
training an YOLO v3 model.
In the process of encoding the ground truth labels, a template of anchor boxes
is being built, which are subsequently matched to the ground truth boxes
via an intersection-over-union threshold criterion.
args:
img_height: image height (how many pixels in height)
img_width: image width (how many pixels in width)
n_classes: Number of all possible classes.
feature_map_size: List of length n and format [(h,w), ...], n is number of feature maps and
(h, w) is the last two dims of NCHW feature maps' shape
anchors: List of 3 elements indicating the anchor boxes shape on feature maps. first element
is for smallest feature map (i.e. to detect large objects). Last element is for largest
feature map (i.e. to detect small objects). Each element is a list of tuples of size 2,
in the format of (w, h). The length of the list can be any integer larger than 0.
'''
def __init__(self, # pylint: disable=W0102
img_height,
img_width,
n_classes,
feature_map_size=None,
anchors=None):
'''See class documentation for details.'''
self.n_classes = n_classes
self.fmap_size = tf.convert_to_tensor(feature_map_size)
self.image_size = tf.convert_to_tensor([[img_height, img_width]], dtype=tf.float32)
self.fmap_size_ratio = tf.truediv(
self.image_size,
tf.cast(self.fmap_size, dtype=tf.float32)
)
self.box_inds_in_fmap = tf.constant(sum([[j for j in range(len(anchors[i]))]
for i in range(len(anchors))], []), dtype=tf.int32)
self.fmap_anchor_count = tf.constant([len(i) for i in anchors], dtype=tf.int32)
# compute cumsum of box_idx_offset
self.box_idx_offset = tf.constant([0], dtype=tf.int32)
box_total = tf.constant(0, dtype=tf.int32)
for i in range(len(feature_map_size)):
box_per_fmap = feature_map_size[i][0] * feature_map_size[i][1] * len(anchors[i])
_sum = self.box_idx_offset[-1:] + box_per_fmap
box_total = box_total + box_per_fmap
self.box_idx_offset = tf.concat([self.box_idx_offset, _sum], axis=-1)
self.box_idx_offset = self.box_idx_offset[:-1]
self.encoding_template = tf.zeros([box_total, 5 + n_classes], tf.float32)
anchors = np.array(anchors)
self.anchor_fmap_mapping = tf.constant(sum([[i] * len(anchors[i])
for i in range(len(anchors))], []),
dtype=tf.int32)
anchors = anchors.reshape(-1, 2)
w = anchors[:, 0]
h = anchors[:, 1]
self.anchor_boxes = tf.constant(np.stack([-h / 2.0, -w / 2.0, h / 2.0, w / 2.0], axis=1),
dtype=tf.float32)
def __call__(self, ground_truth_labels):
'''
Converts ground truth bounding box data into a suitable format to train a YOLO v3 model.
Arguments:
ground_truth_labels (list): A python list of length `batch_size` that contains one 2D
Numpy array for each batch image. Each such array has `k` rows for the `k` ground
truth bounding boxes belonging to the respective image, and the data for each ground
truth bounding box has the format `(class_id, xmin, ymin, xmax, ymax)` (i.e. the
'corners' coordinate format), and `class_id` must be an integer greater than 0 for
all boxes as class ID 0 is reserved for the background class.
Returns:
`y_encoded`, a 3D numpy array of shape
`(batch_size, #boxes, [cy, cx, h, w, objectness, objectness_negative, cls])` that
serves as the ground truth label tensor for training, where `#boxes` is the total number
of boxes predicted by the model per image. cx, cy, h, w are centroid coord. cls is
one-hot class encoding. objectness = 1 if matched to GT, else 0. objectness_negative = 1
if not matched to GT and not neutral, else 0.
'''
encoded = []
for gt_label in ground_truth_labels: # For each batch item...
match_y = tf.cond(tf.equal(tf.shape(gt_label)[0], 0),
lambda: self.encoding_template,
lambda label=gt_label: self.__process_one_img(label), strict=True)
encoded.append(match_y)
return tf.stack(encoded, axis=0)
def __process_one_img(self, gt_label):
'''
TF graph for processing one image groundtruthing.
Args:
gt_label: 2D Numpy array for this image with `k` rows for the `k` ground
truth bounding boxes belonging to the image, and the data for each ground
truth bounding box has the format `(class_id, xmin, ymin, xmax, ymax)` (i.e. the
'corners' coordinate format), and `class_id` must be an integer greater than 0 for
all boxes as class ID 0 is reserved for the background class.
Returns:
encoded_target: `(#anchor_boxes, [cy, cx, h, w, objectness, objectness_negative, cls])`
'''
# nightmare level TF graph build.
# Commented-out code is for single box match. which is easier to understand.
# Real code matches all possible boxes with given GT togather.
gt_label = tf.cast(gt_label, tf.float32)
classes_one_hot = tf.one_hot(tf.reshape(tf.cast(gt_label[:, 0], tf.int32), [-1]),
self.n_classes)
cy = tf.truediv(gt_label[:, 3:4] + gt_label[:, 5:6], 2.0)
cx = tf.truediv(gt_label[:, 2:3] + gt_label[:, 4:5], 2.0)
h = gt_label[:, 5:6] - gt_label[:, 3:4]
w = gt_label[:, 4:5] - gt_label[:, 2:3]
objectness = tf.ones_like(w)
# gt encoded as [:, 4+n_cls]
one_hot_gt = tf.concat([cy, cx, h, w, objectness, classes_one_hot], axis=-1)
# force center to (0, 0)
gt_centroid_0 = tf.concat([tf.truediv(-h, 2.0), tf.truediv(-w, 2.0), tf.truediv(h, 2.0),
tf.truediv(w, 2.0)], axis=-1)
num_gt = tf.shape(gt_centroid_0)[0]
iou_result = iou_tf(gt_centroid_0, self.anchor_boxes)
# iou_match = tf.reshape(tf.argmax(iou_result, axis=-1, output_type=tf.int32), [-1])
# size (#gt_box, #anchors)
fmap_match_all = tf.tile(tf.reshape(self.anchor_fmap_mapping, [1, -1]), [num_gt, 1])
# fmap_match = tf.gather(self.anchor_fmap_mapping, iou_match)
# fmap_size_ratio = tf.gather(self.fmap_size_ratio, fmap_match, axis=0)
# size (#gt_box, #anchors, 2)
fmap_ratio_all = tf.gather(self.fmap_size_ratio, fmap_match_all, axis=0)
# fmap_size = tf.gather(self.fmap_size, fmap_match, axis=0)
# size (#gt_box, #anchors, 2)
fmap_size_all = tf.gather(self.fmap_size, fmap_match_all, axis=0)
# fmap_shift = tf.gather(self.box_idx_offset, fmap_match)
# size (#gt_box, #anchors)
fmap_shift_all = tf.gather(self.box_idx_offset, fmap_match_all)
# anchor_count = tf.gather(self.fmap_anchor_count, fmap_match)
anchor_count_all = tf.gather(self.fmap_anchor_count, fmap_match_all)
cycx = one_hot_gt[..., :2]
# adjusted_box_center = tf.truediv(cycx, fmap_size_ratio)
# size (#gt, #anchor, 2)
adjusted_box_center_all = tf.truediv(
tf.reshape(cycx * self.image_size, [-1, 1, 2]),
fmap_ratio_all
)
# box_center_cell = tf.cast(tf.maximum(tf.floor(adjusted_box_center - 1e-5), 0.0), tf.int32)
box_center_limit = tf.truediv(
tf.reshape(tf.ones_like(cycx) * self.image_size, [-1, 1, 2]),
fmap_ratio_all
)
box_center_cell_all = tf.cast(
tf.floor(tf.maximum(tf.minimum(adjusted_box_center_all, box_center_limit - 1e-3), 0.)),
tf.int32
)
# cell_shift = (box_center_cell[..., 0] * fmap_size[..., 1] + box_center_cell[..., 1])
# * anchor_count
cell_shift_all = (box_center_cell_all[..., 0] * fmap_size_all[..., 1] +
box_center_cell_all[..., 1]) * anchor_count_all
# anchor_shift = tf.gather(self.box_inds_in_fmap, iou_match)
anchor_shift_all = tf.tile(tf.reshape(self.box_inds_in_fmap, [1, -1]), [num_gt, 1])
# box_ind = fmap_shift + cell_shift + anchor_shift, (#gt, #anchors)
box_ind_all = fmap_shift_all + cell_shift_all + anchor_shift_all
iou_match = tf.reshape(tf.argmax(iou_result, axis=-1, output_type=tf.int32), [-1, 1])
best_match_box = tf.gather_nd(
box_ind_all,
tf.concat([tf.reshape(tf.range(num_gt), [-1, 1]), iou_match], axis=-1)
)
encoded = tensor_slice_replace(
self.encoding_template,
one_hot_gt,
best_match_box,
tf.range(num_gt)
)
return encoded
def area(boxlist, scope=None):
"""Computes area of boxes.
Args:
boxlist (tensor): Tensor of shape [N,4], holding xmin, ymin, xmax, ymax
scope: name scope.
Returns:
a tensor with shape [N] representing box areas.
"""
with tf.name_scope(scope, 'Area'):
return (boxlist[:, 2] - boxlist[:, 0]) * (boxlist[:, 3] - boxlist[:, 1])
def intersection(boxlist1, boxlist2, scope=None):
"""Compute pairwise intersection areas between boxes.
Args:
boxlist1 (tensor): Tensor of shape [N,4], holding xmin, ymin, xmax, ymax
boxlist2 (tensor): Tensor of shape [M,4], holding xmin, ymin, xmax, ymax
scope: name scope.
Returns:
a tensor with shape [N, M] representing pairwise intersections
"""
with tf.name_scope(scope, 'Intersection'):
x_min1, y_min1, x_max1, y_max1 = tf.split(
value=boxlist1, num_or_size_splits=4, axis=1)
x_min2, y_min2, x_max2, y_max2 = tf.split(
value=boxlist2, num_or_size_splits=4, axis=1)
all_pairs_min_ymax = tf.minimum(y_max1, tf.transpose(y_max2))
all_pairs_max_ymin = tf.maximum(y_min1, tf.transpose(y_min2))
intersect_heights = tf.maximum(0.0, all_pairs_min_ymax - all_pairs_max_ymin)
all_pairs_min_xmax = tf.minimum(x_max1, tf.transpose(x_max2))
all_pairs_max_xmin = tf.maximum(x_min1, tf.transpose(x_min2))
intersect_widths = tf.maximum(0.0, all_pairs_min_xmax - all_pairs_max_xmin)
return intersect_heights * intersect_widths
def iou_tf(boxlist1, boxlist2, scope=None):
"""Computes pairwise intersection-over-union between box collections.
Args:
boxlist1 (tensor): Tensor of shape [N,4], holding xmin, ymin, xmax, ymax
boxlist2 (tensor): Tensor of shape [M,4], holding xmin, ymin, xmax, ymax
scope: name scope.
Returns:
a tensor with shape [N, M] representing pairwise iou scores.
"""
with tf.name_scope(scope, 'IOU'):
intersections = intersection(boxlist1, boxlist2)
areas1 = area(boxlist1)
areas2 = area(boxlist2)
unions = (
tf.expand_dims(areas1, 1) + tf.expand_dims(areas2, 0) - intersections)
return tf.where(
tf.equal(intersections, 0.0),
tf.zeros_like(intersections), tf.truediv(intersections, unions))
def tensor_slice_replace(a, b, a_idx, b_idx, scope=None):
'''
Returns a new tensor same as `a` but with `a[a_idx] = b[b_idx]`.
Args:
a, b (tensor): `a` and `b` must have same shape except for
the first dimension.
a_idx, b_idx (tensor): 1D tensors. `a_idx` and `b_idx` must
have the same shape and all elements in `a_idx` should
be smaller than `a.shape[0]`. Similar for `b_idx`
Returns:
c (tensor): A tensor same as `a` but with `a_idx` repalced
by `b[b_idx]`.
'''
with tf.name_scope(scope, 'SliceReplace'):
a_all_idx = tf.range(tf.shape(a)[0])
_, a_remaining_idx = tf.setdiff1d(a_all_idx, a_idx)
return tf.dynamic_stitch([a_remaining_idx, a_idx],
[tf.gather(a, a_remaining_idx),
tf.gather(b, b_idx)])
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/yolo_v3/dataio/input_encoder.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TLT YOLOv3 data sequence."""
import cv2
import numpy as np
from nvidia_tao_tf1.cv.common.dataio.augmentation_lib import (
aug_flip,
aug_hsv,
aug_jitter,
aug_letterbox_resize
)
from nvidia_tao_tf1.cv.common.dataio.detection_data_sequence import DetectionDataSequence
class YOLOv3DataSequence(DetectionDataSequence):
"""YOLOv3 data sequence."""
def __init__(self, *args, **kwargs):
"""Init function."""
super().__init__(*args, **kwargs)
# randomize input shape
if self.augmentation_config.randomize_input_shape_period > 0 and self.is_training:
self._gen_random_shape()
else:
self.w_rand = None
self.h_rand = None
# Defaults to 8-bit image
self.image_depth = int(self.augmentation_config.output_depth) or 8
if self.image_depth not in [8, 16]:
raise ValueError(
f"Only 8-bit and 16-bit images are supported, got {self.image_depth}-bit image"
)
def _gen_random_shape(self):
"""generate random input shape for all data points."""
w = self.augmentation_config.output_width / 32.0
h = self.augmentation_config.output_height / 32.0
assert self.augmentation_config.randomize_input_shape_period > 0, "Incorrect rand period!"
# +1 to make sure we have enough random shape
rand_len = len(self) // self.augmentation_config.randomize_input_shape_period + 1
self.w_rand = np.random.randint(low=int(round(0.6*w)),
high=int(round(1.5*w + 1.0)),
size=rand_len) * 32
self.h_rand = np.random.randint(low=int(round(0.6*h)),
high=int(round(1.5*h + 1.0)),
size=rand_len) * 32
self.w_rand = np.repeat(self.w_rand, self.augmentation_config.randomize_input_shape_period)
self.h_rand = np.repeat(self.h_rand, self.augmentation_config.randomize_input_shape_period)
def _preprocessing(self, image, label, output_img_size):
bboxes = label[:, -4:]
if self.is_training:
# Build augmentation pipe.
image = aug_hsv(image,
self.augmentation_config.hue,
self.augmentation_config.saturation,
self.augmentation_config.exposure,
depth=self.image_depth)
if np.random.rand() < self.augmentation_config.vertical_flip:
image, bboxes = aug_flip(image, bboxes, ftype=0)
if np.random.rand() < self.augmentation_config.horizontal_flip:
image, bboxes = aug_flip(image, bboxes, ftype=1)
image, bboxes = aug_jitter(image, bboxes,
jitter=self.augmentation_config.jitter,
resize_ar=float(output_img_size[0]) / output_img_size[1])
image = cv2.resize(image, output_img_size, cv2.INTER_LINEAR)
else:
image, bboxes = aug_letterbox_resize(image, bboxes, resize_shape=output_img_size)
# Finalize
label[:, -4:] = bboxes
label = self._filter_invalid_labels(label)
if self.encode_fn is not None:
label = self.encode_fn(output_img_size, label)
return image, label
def __getitem__(self, batch_idx):
"""Load a full batch."""
images = []
labels = []
raw_labels = []
if self.w_rand is not None:
output_img_size = (self.w_rand[batch_idx], self.h_rand[batch_idx])
else:
output_img_size = self.output_img_size
for idx in range(batch_idx * self.batch_size,
min(self.n_samples, (batch_idx + 1) * self.batch_size)):
if self.output_raw_label:
image, label, raw_label = self._get_single_item(idx, output_img_size)
else:
image, label = self._get_single_item(idx, output_img_size)
images.append(image)
labels.append(label)
if self.output_raw_label:
raw_labels.append(raw_label)
image_batch, label_batch = self._batch_post_processing(images, labels)
if self.output_raw_label:
return image_batch, (label_batch, raw_labels)
return image_batch, label_batch
def on_epoch_end(self):
"""shuffle data at end."""
if self.is_training:
np.random.shuffle(self.data_inds)
if self.augmentation_config.randomize_input_shape_period > 0:
self._gen_random_shape()
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/yolo_v3/dataio/data_sequence.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""IVA YOLO Decode Layer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import keras.backend as K
from keras.engine.topology import Layer
import tensorflow as tf
class YOLODecodeLayer(Layer):
'''Decodes model output to corner-formatted boxes.'''
def call(self, x):
'''
Decode output.
Args:
x: 3-D tensor. Last dimension is
(cy, cx, ph, pw, step_y, step_x, pred_y, pred_x, pred_h, pred_w, object, cls...)
Returns:
boxes: 3-D tensor. Last dimension is (x_min, y_min, x_max, y_max, cls_score)
'''
# shape [..., num_cls]
# !!! DO NOT replace `:, :, :,` with `...,` as this fails TensorRT export
cls_score = tf.sigmoid(x[:, :, 11:]) * tf.sigmoid(x[:, :, 10:11])
by = x[:, :, 0:1] + tf.sigmoid(x[:, :, 6:7]) * x[:, :, 4:5] # shape [..., 1]
bx = x[:, :, 1:2] + tf.sigmoid(x[:, :, 7:8]) * x[:, :, 5:6] # shape [..., 1]
bh = x[:, :, 2:3] * tf.exp(x[:, :, 8:9]) # shape [..., 1]
bw = x[:, :, 3:4] * tf.exp(x[:, :, 9:10]) # shape [..., 1]
x_min = bx - 0.5 * bw
x_max = bx + 0.5 * bw
y_min = by - 0.5 * bh
y_max = by + 0.5 * bh
# tf.concat(axis=-1) can't be processed correctly by uff converter.
return K.concatenate([x_min, y_min, x_max, y_max, cls_score], -1)
def compute_output_shape(self, input_shape):
'''Layer output shape function.'''
return (input_shape[0], input_shape[1], input_shape[2] - 7)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/yolo_v3/layers/decode_layer.py |
# Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
"""IVA YOLO anchor box layer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import keras.backend as K
from keras.engine.topology import InputSpec, Layer
import numpy as np
import tensorflow as tf
class YOLOAnchorBox(Layer):
'''
YOLOAnchorBox layer.
This is a keras custom layer for YOLO v3/v4 AnchorBox. Dynamic / static
input shape anchors are built in different ways so that:
Dynamic: A lot of TF Ops. Slower
Static: Only support fixed size. Faster and can be exported to TensorRT.
Input shape:
4D tensor of shape `(batch, channels, height, width)`
Output shape:
3D tensor of shape `(batch, n_boxes, 6)`. The last axis is
(cy, cx, ph, pw, step_y, step_x).
'''
def __init__(self,
anchor_size=None,
**kwargs):
'''
init function.
All arguments need to be set to the same values as in the box encoding process,
otherwise the behavior is undefined. Some of these arguments are explained in
more detail in the documentation of the `SSDBoxEncoder` class.
Arguments:
anchor_size: array of tuple of 2 ints [(width, height), (width, height), ...]
size must be normalized (i.e. div by image size)
'''
self.anchor_size = anchor_size
super(YOLOAnchorBox, self).__init__(**kwargs)
def build(self, input_shape):
"""Layer build function."""
self.input_spec = [InputSpec(shape=input_shape)]
if (input_shape[2] is not None) and (input_shape[3] is not None):
anchors = np_get_anchor_hw((input_shape[2], input_shape[3]),
[(i[1], i[0]) for i in self.anchor_size])
# Build a 4D tensor so that TensorRT UFF parser can work correctly.
anchors = anchors.reshape(1, 1, -1, 6)
self.num_anchors = anchors.shape[2]
self.anchors = K.constant(anchors, dtype='float32')
else:
self.num_anchors = None
anchors = np.array([[i[1], i[0]] for i in self.anchor_size]).reshape(1, 1, -1, 2)
self.anchors = K.constant(anchors, dtype='float32')
# (feature_map, n_boxes, 6)
super(YOLOAnchorBox, self).build(input_shape)
def call(self, x):
'''
Return an anchor box tensor based on the shape of the input tensor.
Note that this tensor does not participate in any graph computations at runtime.
It is being created as a constant once during graph creation and is just being
output along with the rest of the model output during runtime. Because of this,
all logic is implemented as Numpy array operations and it is sufficient to convert
the resulting Numpy array into a Keras tensor at the very end before outputting it.
Arguments:
x (tensor): 4D tensor of shape `(batch, channels, height, width)`.
The input for this layer must be the output
of the localization predictor layer.
'''
# Compute box width and height for each aspect ratio
# The shorter side of the image will be used to compute `w` and `h`.
if self.num_anchors is not None:
anchor_dup = tf.identity(self.anchors)
with tf.name_scope(None, 'FirstDimTile'):
x_dup = tf.identity(x)
anchors = K.tile(anchor_dup, (K.shape(x_dup)[0], 1, 1, 1))
# this step is done for TRT export. The BatchDimTile supports 4-D input
anchors = K.reshape(anchors, [-1, self.num_anchors, 6])
else:
feature_w = tf.shape(x)[3]
feature_h = tf.shape(x)[2]
anchors = tf.tile(self.anchors, [feature_h, feature_w, 1, 1])
xx, yy = tf.meshgrid(tf.range(0.0, 1.0, 1.0 / tf.cast(feature_w, tf.float32)),
tf.range(0.0, 1.0, 1.0 / tf.cast(feature_h, tf.float32)))
xx = tf.reshape(xx, [feature_h, feature_w, 1, 1])
yy = tf.reshape(yy, [feature_h, feature_w, 1, 1])
xx = tf.tile(xx, [1, 1, len(self.anchor_size), 1])
yy = tf.tile(yy, [1, 1, len(self.anchor_size), 1])
shape_template = tf.zeros_like(yy)
y_step = shape_template + 1.0 / tf.cast(feature_h, tf.float32)
x_step = shape_template + 1.0 / tf.cast(feature_w, tf.float32)
anchors = tf.concat([yy, xx, anchors, y_step, x_step], -1)
anchors = tf.reshape(anchors, [1, -1, 6])
anchors = K.tile(anchors, (K.shape(x)[0], 1, 1))
return anchors
def compute_output_shape(self, input_shape):
'''Layer output shape function.'''
batch_size = input_shape[0]
return (batch_size, self.num_anchors, 6)
def get_config(self):
'''Layer get_config function.'''
config = {
'anchor_size': self.anchor_size,
}
base_config = super(YOLOAnchorBox, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def np_get_anchor_hw(feature_map_size, anchor_size_hw):
'''
Get YOLO Anchors.
Args:
im_size: tuple of 2 ints (height, width)
feature_map_size: tuple of 2 ints (height, width)
anchor_size_hw: array of tuple of 2 ints [(height, width), (height, width), ...]
Returns:
anchor_results: (cy, cx, ph, pw, step_y, step_x)
'''
anchor_results = np.zeros((feature_map_size[0], feature_map_size[1], len(anchor_size_hw), 6))
x, y = np.meshgrid(np.arange(0, 1.0, 1.0 / feature_map_size[1]),
np.arange(0, 1.0, 1.0 / feature_map_size[0]))
y = np.expand_dims(y, -1)
x = np.expand_dims(x, -1)
anchor_results[..., 0] += y
anchor_results[..., 1] += x
anchor_results[..., 4] += 1.0 / feature_map_size[0]
anchor_results[..., 5] += 1.0 / feature_map_size[1]
for idx, anchor in enumerate(anchor_size_hw):
anchor_results[:, :, idx, 2] += float(anchor[0])
anchor_results[:, :, idx, 3] += float(anchor[1])
return anchor_results
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/yolo_v3/layers/yolo_anchor_box_layer.py |
tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/yolo_v3/layers/__init__.py |
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""IVA YOLO NMS Layer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from keras.engine.topology import Layer
import tensorflow as tf
class NMSLayer(Layer):
'''
NMS layer to get final outputs from raw pred boxes.
Args:
output_size: how many boxes you want for final outputs (padded by zeros)
iou_threshold: boxes with iou > threshold will be NMSed
score_threshold: Remove boxes with confidence less than threshold before NMS.
'''
def __init__(self,
output_size=200,
iou_threshold=0.5,
score_threshold=0.01,
force_on_cpu=False,
**kwargs):
'''Init function.'''
self.output_size = output_size
self.iou_threshold = iou_threshold
self.score_threshold = score_threshold
self.force_on_cpu = force_on_cpu
super(NMSLayer, self).__init__(**kwargs)
def call(self, x):
'''
Perform NMS on output.
Args:
x: 3-D tensor. Last dimension is (x_min, y_min, x_max, y_max, cls_confidence[0, 1, ...])
Returns:
results: 3-D Tensor, [num_batch, output_size, 6].
The last dim is (cls_inds, cls_score, xmin, ymin, xmax, ymax)
'''
if not self.force_on_cpu:
nmsed_box, nmsed_score, nmsed_class, _ = tf.image.combined_non_max_suppression(
tf.expand_dims(x[..., :4], axis=2),
x[..., 4:],
max_output_size_per_class=self.output_size,
max_total_size=self.output_size,
iou_threshold=self.iou_threshold,
score_threshold=self.score_threshold,
pad_per_class=False,
clip_boxes=True,
name='batched_nms'
)
else:
with tf.device("cpu:0"):
nmsed_box, nmsed_score, nmsed_class, _ = tf.image.combined_non_max_suppression(
tf.expand_dims(x[..., :4], axis=2),
x[..., 4:],
max_output_size_per_class=self.output_size,
max_total_size=self.output_size,
iou_threshold=self.iou_threshold,
score_threshold=self.score_threshold,
pad_per_class=False,
clip_boxes=True,
name='batched_nms'
)
nmsed_score = tf.expand_dims(nmsed_score, axis=-1)
nmsed_class = tf.expand_dims(nmsed_class, axis=-1)
outputs = tf.concat([nmsed_class, nmsed_score, nmsed_box], axis=-1)
return outputs
def compute_output_shape(self, input_shape):
'''Layer output shape function.'''
return (input_shape[0], self.output_size, 6)
def get_config(self):
'''Layer get_config function.'''
config = {
'output_size': self.output_size,
'iou_threshold': self.iou_threshold,
'score_threshold': self.score_threshold,
'force_on_cpu': self.force_on_cpu,
}
base_config = super(NMSLayer, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/yolo_v3/layers/nms_layer.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""helper layers for model export."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import keras.backend as K
from keras.layers import Layer
class BoxLayer(Layer):
'''
Helper layer to export model - Get box.
Input:
Encoded detection (last layer output of training model).
Output:
Boxes in corner format (x_min, y_min, x_max, y_max)
'''
def compute_output_shape(self, input_shape):
'''Define output shape.'''
return (input_shape[0], input_shape[1], 1, 4)
def call(self, x):
'''See class doc.'''
x_shape = K.shape(x)
x = K.reshape(x, [x_shape[0], x_shape[1], 1, x_shape[2]])
by = x[:, :, :, 0:1] + K.sigmoid(x[:, :, :, 6:7]) * x[:, :, :, 4:5] # shape [..., 1]
bx = x[:, :, :, 1:2] + K.sigmoid(x[:, :, :, 7:8]) * x[:, :, :, 5:6] # shape [..., 1]
bh = x[:, :, :, 2:3] * K.exp(x[:, :, :, 8:9]) # shape [..., 1]
bw = x[:, :, :, 3:4] * K.exp(x[:, :, :, 9:10]) # shape [..., 1]
x_min = bx - 0.5 * bw
x_max = bx + 0.5 * bw
y_min = by - 0.5 * bh
y_max = by + 0.5 * bh
loc = K.concatenate([x_min, y_min, x_max, y_max], -1)
return K.identity(loc, name="out_box")
class ClsLayer(Layer):
'''
Helper layer to export model - Get class score.
Input:
Encoded detection (last layer output of training model).
Output:
(Sigmoid) confidence scores for each class.
'''
def compute_output_shape(self, input_shape):
'''Define output shape.'''
return (input_shape[0], input_shape[1], input_shape[2]-11, 1)
def call(self, x):
'''See class doc.'''
# shape [..., num_cls]
x_shape = K.shape(x)
x = K.reshape(x, [x_shape[0], x_shape[1], x_shape[2], 1])
cls_score = K.sigmoid(x[:, :, 11:, :]) * K.sigmoid(x[:, :, 10:11, :])
return K.identity(cls_score, name="out_cls")
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/yolo_v3/layers/export_layers.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""test NMS."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from keras.layers import Input
from keras.models import Model
import numpy as np
from nvidia_tao_tf1.cv.yolo_v3.layers.nms_layer import NMSLayer
def test_NMS_truncate():
x = Input(shape=(5, 7))
y = NMSLayer(output_size=3)(x)
model = Model(inputs=x, outputs=y)
# See sample details in test_input_encoder
encoded_val = '''np.array([[[ 1., 2., 3., 4., 1., 0., 0.],
[ 35., 36., 37., 38., 0.5, 0., 0.],
[ 15., 16., 17., 18., 0.7, 0., 0.],
[ 25., 26., 27., 28., 0.6, 0., 0.],
[ 5., 6., 7., 8., 0.8, 0., 0.]]])'''
encoded_val = eval(encoded_val)
# normalized
encoded_val[..., :4] = encoded_val[..., :4] / 50.0
expected = '''np.array( [[[ 0. , 1. , 1. , 2. , 3. , 4. ],
[ 0. , 0.8, 5. , 6. , 7. , 8. ],
[ 0. , 0.7, 15. , 16. , 17. , 18. ]]] )'''
expected = eval(expected)
# normalize
expected[..., -4:] = expected[..., -4:] / 50.0
pred = model.predict(encoded_val)
assert np.max(np.abs(pred - expected)) < 1e-5
def test_NMS_padding():
x = Input(shape=(5, 7))
y = NMSLayer(output_size=10)(x)
model = Model(inputs=x, outputs=y)
# See sample details in test_input_encoder
encoded_val = '''np.array([[[ 1., 2., 3., 4., 1., 0., 0.],
[ 35., 36., 37., 38., 0.5, 0., 0.],
[ 15., 16., 17., 18., 0.7, 0., 0.],
[ 25., 26., 27., 28., 0.6, 0., 0.],
[ 5., 6., 7., 8., 0.8, 0., 0.]]])'''
encoded_val = eval(encoded_val)
# normalized
encoded_val[..., :4] = encoded_val[..., :4] / 50.0
expected = '''np.array( [[[ 0. , 1. , 1. , 2. , 3. , 4. ],
[ 0. , 0.8, 5. , 6. , 7. , 8. ],
[ 0. , 0.7, 15. , 16. , 17. , 18. ],
[ 0. , 0.6, 25. , 26. , 27. , 28. ],
[ 0. , 0.5, 35. , 36. , 37. , 38. ],
[ 0. , 0. , 0. , 0. , 0. , 0. ],
[ 0. , 0. , 0. , 0. , 0. , 0. ],
[ 0. , 0. , 0. , 0. , 0. , 0. ],
[ 0. , 0. , 0. , 0. , 0. , 0. ],
[ 0. , 0. , 0. , 0. , 0. , 0. ]]] )'''
expected = eval(expected)
# normalize
expected[..., -4:] = expected[..., -4:] / 50.0
pred = model.predict(encoded_val)
assert np.max(np.abs(pred - expected)) < 1e-5
def test_NMS_nms():
x = Input(shape=(5, 7))
y = NMSLayer(output_size=10)(x)
model = Model(inputs=x, outputs=y)
# See sample details in test_input_encoder
encoded_val = '''np.array([[[ 1., 2., 3., 4., 1., 0., 0.],
[ 35., 36., 37., 38., 0.5, 0., 0.],
[ 5.3, 6.3, 7., 8., 0.7, 0., 0.],
[ 25., 26., 27., 28., 0.6, 0., 0.],
[ 5., 6., 7., 8., 0.8, 0., 0.]]])'''
encoded_val = eval(encoded_val)
# normalized
encoded_val[..., :4] = encoded_val[..., :4] / 50.0
expected = '''np.array( [[[ 0. , 1. , 1. , 2. , 3. , 4. ],
[ 0. , 0.8, 5. , 6. , 7. , 8. ],
[ 0. , 0.6, 25. , 26. , 27. , 28. ],
[ 0. , 0.5, 35. , 36. , 37. , 38. ],
[ 0. , 0. , 0. , 0. , 0. , 0. ],
[ 0. , 0. , 0. , 0. , 0. , 0. ],
[ 0. , 0. , 0. , 0. , 0. , 0. ],
[ 0. , 0. , 0. , 0. , 0. , 0. ],
[ 0. , 0. , 0. , 0. , 0. , 0. ],
[ 0. , 0. , 0. , 0. , 0. , 0. ]]] )'''
expected = eval(expected)
# normalize
expected[..., -4:] = expected[..., -4:] / 50.0
pred = model.predict(encoded_val)
assert np.max(np.abs(pred - expected)) < 1e-5
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/yolo_v3/layers/tests/test_nms_layer.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""test helper layer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from keras.layers import Input
from keras.models import Model
import numpy as np
from nvidia_tao_tf1.cv.yolo_v3.layers.export_layers import BoxLayer, ClsLayer
def test_box_layer():
x = Input(shape=(10, 18))
y = BoxLayer()(x)
model = Model(inputs=x, outputs=y)
encoded_val = '''np.array([[[ 0.31853288, 0.43654425, -0.04237543, -0.52155923,
0.19736763, 0.08178696, 0.15963268, 0.21230579,
0.20757881, -0.52988177, 0.61076724, 0.12471242,
-0.11150562, -0.06977893, -0.43470218, 0.30996265,
0.37252738, 0.51286116],
[ 0.14094187, -0.27182029, 0.18511877, -0.07511589,
-0.07891718, 0.31212412, 0.16098262, 0.19797382,
-0.43625911, -0.62739477, 0.40618992, -0.58509829,
0.46435664, 0.30513983, -0.1154359 , -0.12232048,
-0.35335439, -0.15514401],
[-0.4435788 , 0.17339223, 0.49024131, -0.41337404,
-0.03950875, 0.09569453, 0.11478826, 0.30234933,
-0.07100774, 0.18649774, -0.03488029, 0.0932529 ,
-0.1600546 , -0.08485666, -0.17145977, -0.36596332,
0.21935859, -0.4561344 ],
[ 0.26568643, 0.05245326, 0.43467644, -0.04510512,
-0.4880742 , 0.13686102, -0.04520775, 0.27334498,
-0.10976604, 0.30152139, 0.42257043, 0.65097083,
0.65856431, 0.30920291, -0.10761981, -0.17804009,
0.02396226, -0.42966381],
[ 0.0754364 , -0.43377111, 0.34619459, 0.20489158,
0.44529587, 0.26571338, -0.47253105, -0.43720297,
-0.13591284, -0.52512667, 0.14932724, 0.36775451,
-0.16522197, 0.45886091, -0.12075849, -0.53379658,
-0.41089267, 0.10501947],
[ 0.07269398, -0.25815662, -0.15854433, -0.1325061 ,
0.2008364 , -0.12098036, -0.07968031, -0.16857242,
0.00185386, 0.15585919, 0.0737384 , -0.00930042,
0.11230997, 0.42464644, 0.12645006, -0.80636047,
0.26897187, -0.06773979],
[-0.74415635, 0.08761501, 0.58244596, -0.43155333,
0.53962684, -0.07503792, -0.18369426, -0.13517962,
-0.13551022, -0.36913204, 0.03110164, 0.61730996,
-0.12686711, -0.30124402, -0.30547717, -0.22220013,
0.29756512, -0.184686 ],
[-0.33638997, -0.07932845, -0.55568364, 0.05962443,
0.64843452, 0.79589313, -0.01803575, -0.20797992,
-0.351547 , -0.50232191, -0.40235586, -0.02057243,
-0.89491065, -0.1765394 , -0.17597896, 0.09962589,
-0.37461121, -0.42049561],
[ 0.29857787, -0.14727424, 0.01760341, 0.30076768,
0.13391777, -0.2511477 , -0.5511517 , -0.32004931,
-0.1215235 , 0.20353435, -0.07441485, 0.10444563,
-0.0649847 , 0.02956609, 0.39488643, 0.13267954,
0.38612237, 0.02984453],
[ 0.56022737, -0.10532191, -0.125717 , 0.09212133,
0.00289174, 0.27512265, 0.19977999, 0.32625175,
0.11545165, 0.2473364 , 0.48727129, 0.22696133,
-0.29905336, 0.01784677, 0.44397951, 0.12411839,
0.24461395, 0.15557853]]])'''
encoded_val = eval(encoded_val)
expected = '''np.array([[[[ 0.6352768 , 0.4511522 , 0.3282481 , 0.399001 ]],
[[-0.08030513, 0.03847902, -0.1204156 , 0.15814908]],
[[ 0.47748056, -0.6927845 , -0.02064419, -0.23614697]],
[[ 0.16066766, -0.16758 , 0.09968942, 0.22190909]],
[[-0.3900978 , 0.09533787, -0.26890844, 0.39753762]],
[[-0.23613298, 0.24853289, -0.39098775, 0.08969437]],
[[ 0.20180187, -0.7533715 , -0.09654567, -0.24473873]],
[[ 0.2593441 , 0.18039277, 0.29542428, -0.2105855 ]],
[[-0.43725252, 0.33974332, -0.06859337, 0.35533237]],
[[-0.00450347, 0.6323684 , 0.11346802, 0.49126607]]]])'''
expected = eval(expected)
pred = model.predict(encoded_val)
assert np.max(np.abs(pred - expected)) < 1e-3
def test_cls_layer():
x = Input(shape=(10, 18))
y = ClsLayer()(x)
model = Model(inputs=x, outputs=y)
encoded_val = '''np.array([[[ 0.31853288, 0.43654425, -0.04237543, -0.52155923,
0.19736763, 0.08178696, 0.15963268, 0.21230579,
0.20757881, -0.52988177, 0.61076724, 0.12471242,
-0.11150562, -0.06977893, -0.43470218, 0.30996265,
0.37252738, 0.51286116],
[ 0.14094187, -0.27182029, 0.18511877, -0.07511589,
-0.07891718, 0.31212412, 0.16098262, 0.19797382,
-0.43625911, -0.62739477, 0.40618992, -0.58509829,
0.46435664, 0.30513983, -0.1154359 , -0.12232048,
-0.35335439, -0.15514401],
[-0.4435788 , 0.17339223, 0.49024131, -0.41337404,
-0.03950875, 0.09569453, 0.11478826, 0.30234933,
-0.07100774, 0.18649774, -0.03488029, 0.0932529 ,
-0.1600546 , -0.08485666, -0.17145977, -0.36596332,
0.21935859, -0.4561344 ],
[ 0.26568643, 0.05245326, 0.43467644, -0.04510512,
-0.4880742 , 0.13686102, -0.04520775, 0.27334498,
-0.10976604, 0.30152139, 0.42257043, 0.65097083,
0.65856431, 0.30920291, -0.10761981, -0.17804009,
0.02396226, -0.42966381],
[ 0.0754364 , -0.43377111, 0.34619459, 0.20489158,
0.44529587, 0.26571338, -0.47253105, -0.43720297,
-0.13591284, -0.52512667, 0.14932724, 0.36775451,
-0.16522197, 0.45886091, -0.12075849, -0.53379658,
-0.41089267, 0.10501947],
[ 0.07269398, -0.25815662, -0.15854433, -0.1325061 ,
0.2008364 , -0.12098036, -0.07968031, -0.16857242,
0.00185386, 0.15585919, 0.0737384 , -0.00930042,
0.11230997, 0.42464644, 0.12645006, -0.80636047,
0.26897187, -0.06773979],
[-0.74415635, 0.08761501, 0.58244596, -0.43155333,
0.53962684, -0.07503792, -0.18369426, -0.13517962,
-0.13551022, -0.36913204, 0.03110164, 0.61730996,
-0.12686711, -0.30124402, -0.30547717, -0.22220013,
0.29756512, -0.184686 ],
[-0.33638997, -0.07932845, -0.55568364, 0.05962443,
0.64843452, 0.79589313, -0.01803575, -0.20797992,
-0.351547 , -0.50232191, -0.40235586, -0.02057243,
-0.89491065, -0.1765394 , -0.17597896, 0.09962589,
-0.37461121, -0.42049561],
[ 0.29857787, -0.14727424, 0.01760341, 0.30076768,
0.13391777, -0.2511477 , -0.5511517 , -0.32004931,
-0.1215235 , 0.20353435, -0.07441485, 0.10444563,
-0.0649847 , 0.02956609, 0.39488643, 0.13267954,
0.38612237, 0.02984453],
[ 0.56022737, -0.10532191, -0.125717 , 0.09212133,
0.00289174, 0.27512265, 0.19977999, 0.32625175,
0.11545165, 0.2473364 , 0.48727129, 0.22696133,
-0.29905336, 0.01784677, 0.44397951, 0.12411839,
0.24461395, 0.15557853]]])'''
encoded_val = eval(encoded_val)
expected = '''np.array([[[ 0.3442388 , 0.30600947, 0.31275627, 0.25471213, 0.37388256,
0.3837296 , 0.4053815 ],
[0.21471846, 0.3685351 , 0.34551924, 0.2827858 , 0.28175643,
0.2476133 , 0.2768552 ],
[0.25708547, 0.22602433, 0.23522456, 0.22463313, 0.20118774,
0.27247456, 0.19056945],
[0.39703092, 0.3980631 , 0.34837776, 0.2858115 , 0.27523145,
0.3056678 , 0.23813948],
[0.3174772 , 0.24648973, 0.32920435, 0.2524312 , 0.19858934,
0.21420556, 0.28272408],
[0.25800774, 0.27375397, 0.3134377 , 0.27558005, 0.16002087,
0.29386497, 0.25043696],
[0.32985377, 0.23780398, 0.21593297, 0.21540776, 0.22579597,
0.2913851 , 0.23050907],
[0.19831222, 0.11625554, 0.18273215, 0.18278787, 0.21034616,
0.16327505, 0.15885517],
[0.25326118, 0.2328842 , 0.24426049, 0.28761938, 0.2566472 ,
0.28660387, 0.244294 ],
[0.34473014, 0.26376066, 0.31249547, 0.37738135, 0.32892877,
0.34742627, 0.33377704]]])'''
expected = eval(expected).reshape(1, 10, 7, 1)
pred = model.predict(encoded_val)
assert np.max(np.abs(pred - expected)) < 1e-5
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/yolo_v3/layers/tests/test_export_helper_layer.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""test NMS."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from keras.layers import Input
from keras.models import Model
import numpy as np
from nvidia_tao_tf1.cv.yolo_v3.layers.yolo_anchor_box_layer import YOLOAnchorBox
def test_anchorbox_multibatch():
x = Input(shape=(3, 2, 2))
img_size = 416.0
anchor_size = np.array([(116, 90), (156, 198), (373, 326)]) / img_size
y = YOLOAnchorBox(anchor_size=anchor_size)(x)
model = Model(inputs=x, outputs=y)
# See sample details in test_input_encoder
fmap = np.random.normal(size=(2, 3, 2, 2))
expected = '''np.array( [[[ 0., 0., 90., 116.],
[ 0., 0., 198., 156.],
[ 0., 0., 326., 373.],
[ 0., 208., 90., 116.],
[ 0., 208., 198., 156.],
[ 0., 208., 326., 373.],
[208., 0., 90., 116.],
[208., 0., 198., 156.],
[208., 0., 326., 373.],
[208., 208., 90., 116.],
[208., 208., 198., 156.],
[208., 208., 326., 373.]],
[ [ 0., 0., 90., 116.],
[ 0., 0., 198., 156.],
[ 0., 0., 326., 373.],
[ 0., 208., 90., 116.],
[ 0., 208., 198., 156.],
[ 0., 208., 326., 373.],
[208., 0., 90., 116.],
[208., 0., 198., 156.],
[208., 0., 326., 373.],
[208., 208., 90., 116.],
[208., 208., 198., 156.],
[208., 208., 326., 373.]]])'''
expected = eval(expected)
expected = np.concatenate([expected, np.zeros((2, 12, 2)) + 208.0], axis=-1)
pred = model.predict(fmap) * img_size
assert np.max(np.abs(pred - expected)) < 1e-5
x = Input(shape=(3, None, None))
y = YOLOAnchorBox(anchor_size=anchor_size)(x)
model = Model(inputs=x, outputs=y)
pred = model.predict(fmap) * img_size
assert np.max(np.abs(pred - expected)) < 1e-5
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/yolo_v3/layers/tests/test_anchorbox_layer.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""test decoder."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from keras.layers import Input
from keras.models import Model
import numpy as np
from nvidia_tao_tf1.cv.yolo_v3.layers.decode_layer import YOLODecodeLayer
def test_output_decoder():
x = Input(shape=(10, 18))
y = YOLODecodeLayer()(x)
model = Model(inputs=x, outputs=y)
encoded_val = '''np.array([[[ 0.31853288, 0.43654425, -0.04237543, -0.52155923,
0.19736763, 0.08178696, 0.15963268, 0.21230579,
0.20757881, -0.52988177, 0.61076724, 0.12471242,
-0.11150562, -0.06977893, -0.43470218, 0.30996265,
0.37252738, 0.51286116],
[ 0.14094187, -0.27182029, 0.18511877, -0.07511589,
-0.07891718, 0.31212412, 0.16098262, 0.19797382,
-0.43625911, -0.62739477, 0.40618992, -0.58509829,
0.46435664, 0.30513983, -0.1154359 , -0.12232048,
-0.35335439, -0.15514401],
[-0.4435788 , 0.17339223, 0.49024131, -0.41337404,
-0.03950875, 0.09569453, 0.11478826, 0.30234933,
-0.07100774, 0.18649774, -0.03488029, 0.0932529 ,
-0.1600546 , -0.08485666, -0.17145977, -0.36596332,
0.21935859, -0.4561344 ],
[ 0.26568643, 0.05245326, 0.43467644, -0.04510512,
-0.4880742 , 0.13686102, -0.04520775, 0.27334498,
-0.10976604, 0.30152139, 0.42257043, 0.65097083,
0.65856431, 0.30920291, -0.10761981, -0.17804009,
0.02396226, -0.42966381],
[ 0.0754364 , -0.43377111, 0.34619459, 0.20489158,
0.44529587, 0.26571338, -0.47253105, -0.43720297,
-0.13591284, -0.52512667, 0.14932724, 0.36775451,
-0.16522197, 0.45886091, -0.12075849, -0.53379658,
-0.41089267, 0.10501947],
[ 0.07269398, -0.25815662, -0.15854433, -0.1325061 ,
0.2008364 , -0.12098036, -0.07968031, -0.16857242,
0.00185386, 0.15585919, 0.0737384 , -0.00930042,
0.11230997, 0.42464644, 0.12645006, -0.80636047,
0.26897187, -0.06773979],
[-0.74415635, 0.08761501, 0.58244596, -0.43155333,
0.53962684, -0.07503792, -0.18369426, -0.13517962,
-0.13551022, -0.36913204, 0.03110164, 0.61730996,
-0.12686711, -0.30124402, -0.30547717, -0.22220013,
0.29756512, -0.184686 ],
[-0.33638997, -0.07932845, -0.55568364, 0.05962443,
0.64843452, 0.79589313, -0.01803575, -0.20797992,
-0.351547 , -0.50232191, -0.40235586, -0.02057243,
-0.89491065, -0.1765394 , -0.17597896, 0.09962589,
-0.37461121, -0.42049561],
[ 0.29857787, -0.14727424, 0.01760341, 0.30076768,
0.13391777, -0.2511477 , -0.5511517 , -0.32004931,
-0.1215235 , 0.20353435, -0.07441485, 0.10444563,
-0.0649847 , 0.02956609, 0.39488643, 0.13267954,
0.38612237, 0.02984453],
[ 0.56022737, -0.10532191, -0.125717 , 0.09212133,
0.00289174, 0.27512265, 0.19977999, 0.32625175,
0.11545165, 0.2473364 , 0.48727129, 0.22696133,
-0.29905336, 0.01784677, 0.44397951, 0.12411839,
0.24461395, 0.15557853]]])'''
encoded_val = eval(encoded_val)
expected = '''np.array([[[0.6352768 , 0.4511522 , 0.3282481 , 0.399001 ,
0.3442388 , 0.30600947, 0.31275627, 0.25471213,
0.37388256, 0.3837296 , 0.4053815 ],
[-0.08030513, 0.03847902, -0.1204156 , 0.15814908,
0.21471846, 0.3685351 , 0.34551924, 0.2827858 ,
0.28175643, 0.2476133 , 0.2768552 ],
[ 0.47748056, -0.6927845 , -0.02064419, -0.23614697,
0.25708547, 0.22602433, 0.23522456, 0.22463313,
0.20118774, 0.27247456, 0.19056945],
[ 0.16066766, -0.16758 , 0.09968942, 0.22190909,
0.39703092, 0.3980631 , 0.34837776, 0.2858115 ,
0.27523145, 0.3056678 , 0.23813948],
[-0.3900978 , 0.09533787, -0.26890844, 0.39753762,
0.3174772 , 0.24648973, 0.32920435, 0.2524312 ,
0.19858934, 0.21420556, 0.28272408],
[-0.23613298, 0.24853289, -0.39098775, 0.08969437,
0.25800774, 0.27375397, 0.3134377 , 0.27558005,
0.16002087, 0.29386497, 0.25043696],
[ 0.20180187, -0.7533715 , -0.09654567, -0.24473873,
0.32985377, 0.23780398, 0.21593297, 0.21540776,
0.22579597, 0.2913851 , 0.23050907],
[ 0.2593441 , 0.18039277, 0.29542428, -0.2105855 ,
0.19831222, 0.11625554, 0.18273215, 0.18278787,
0.21034616, 0.16327505, 0.15885517],
[-0.43725252, 0.33974332, -0.06859337, 0.35533237,
0.25326118, 0.2328842 , 0.24426049, 0.28761938,
0.2566472 , 0.28660387, 0.244294 ],
[-0.00450347, 0.6323684 , 0.11346802, 0.49126607,
0.34473014, 0.26376066, 0.31249547, 0.37738135,
0.32892877, 0.34742627, 0.33377704]]])'''
expected = eval(expected)
pred = model.predict(encoded_val)
assert np.max(np.abs(pred - expected)) < 1e-5
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/yolo_v3/layers/tests/test_decode_layer.py |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nvidia_tao_tf1/cv/yolo_v3/proto/training_config.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from nvidia_tao_tf1.cv.common.proto import cost_scaling_config_pb2 as nvidia__tao__tf1_dot_cv_dot_common_dot_proto_dot_cost__scaling__config__pb2
from nvidia_tao_tf1.cv.common.proto import learning_rate_config_pb2 as nvidia__tao__tf1_dot_cv_dot_common_dot_proto_dot_learning__rate__config__pb2
from nvidia_tao_tf1.cv.common.proto import optimizer_config_pb2 as nvidia__tao__tf1_dot_cv_dot_common_dot_proto_dot_optimizer__config__pb2
from nvidia_tao_tf1.cv.common.proto import regularizer_config_pb2 as nvidia__tao__tf1_dot_cv_dot_common_dot_proto_dot_regularizer__config__pb2
from nvidia_tao_tf1.cv.common.proto import visualizer_config_pb2 as nvidia__tao__tf1_dot_cv_dot_common_dot_proto_dot_visualizer__config__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='nvidia_tao_tf1/cv/yolo_v3/proto/training_config.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n5nvidia_tao_tf1/cv/yolo_v3/proto/training_config.proto\x1a\x38nvidia_tao_tf1/cv/common/proto/cost_scaling_config.proto\x1a\x39nvidia_tao_tf1/cv/common/proto/learning_rate_config.proto\x1a\x35nvidia_tao_tf1/cv/common/proto/optimizer_config.proto\x1a\x37nvidia_tao_tf1/cv/common/proto/regularizer_config.proto\x1a\x36nvidia_tao_tf1/cv/common/proto/visualizer_config.proto\"\xeb\x03\n\x0eTrainingConfig\x12\x1a\n\x12\x62\x61tch_size_per_gpu\x18\x01 \x01(\r\x12\x12\n\nnum_epochs\x18\x02 \x01(\r\x12*\n\rlearning_rate\x18\x03 \x01(\x0b\x32\x13.LearningRateConfig\x12\'\n\x0bregularizer\x18\x04 \x01(\x0b\x32\x12.RegularizerConfig\x12#\n\toptimizer\x18\x05 \x01(\x0b\x32\x10.OptimizerConfig\x12(\n\x0c\x63ost_scaling\x18\x06 \x01(\x0b\x32\x12.CostScalingConfig\x12\x1b\n\x13\x63heckpoint_interval\x18\x07 \x01(\r\x12\x12\n\nenable_qat\x18\x08 \x01(\x08\x12\x1b\n\x11resume_model_path\x18\t \x01(\tH\x00\x12\x1d\n\x13pretrain_model_path\x18\n \x01(\tH\x00\x12\x1b\n\x11pruned_model_path\x18\x0b \x01(\tH\x00\x12\x16\n\x0emax_queue_size\x18\x0c \x01(\r\x12\x11\n\tn_workers\x18\r \x01(\r\x12\x1b\n\x13use_multiprocessing\x18\x0e \x01(\x08\x12%\n\nvisualizer\x18\x0f \x01(\x0b\x32\x11.VisualizerConfigB\x0c\n\nload_modelb\x06proto3')
,
dependencies=[nvidia__tao__tf1_dot_cv_dot_common_dot_proto_dot_cost__scaling__config__pb2.DESCRIPTOR,nvidia__tao__tf1_dot_cv_dot_common_dot_proto_dot_learning__rate__config__pb2.DESCRIPTOR,nvidia__tao__tf1_dot_cv_dot_common_dot_proto_dot_optimizer__config__pb2.DESCRIPTOR,nvidia__tao__tf1_dot_cv_dot_common_dot_proto_dot_regularizer__config__pb2.DESCRIPTOR,nvidia__tao__tf1_dot_cv_dot_common_dot_proto_dot_visualizer__config__pb2.DESCRIPTOR,])
_TRAININGCONFIG = _descriptor.Descriptor(
name='TrainingConfig',
full_name='TrainingConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='batch_size_per_gpu', full_name='TrainingConfig.batch_size_per_gpu', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='num_epochs', full_name='TrainingConfig.num_epochs', index=1,
number=2, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='learning_rate', full_name='TrainingConfig.learning_rate', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='regularizer', full_name='TrainingConfig.regularizer', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='optimizer', full_name='TrainingConfig.optimizer', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='cost_scaling', full_name='TrainingConfig.cost_scaling', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='checkpoint_interval', full_name='TrainingConfig.checkpoint_interval', index=6,
number=7, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='enable_qat', full_name='TrainingConfig.enable_qat', index=7,
number=8, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='resume_model_path', full_name='TrainingConfig.resume_model_path', index=8,
number=9, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='pretrain_model_path', full_name='TrainingConfig.pretrain_model_path', index=9,
number=10, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='pruned_model_path', full_name='TrainingConfig.pruned_model_path', index=10,
number=11, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='max_queue_size', full_name='TrainingConfig.max_queue_size', index=11,
number=12, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='n_workers', full_name='TrainingConfig.n_workers', index=12,
number=13, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='use_multiprocessing', full_name='TrainingConfig.use_multiprocessing', index=13,
number=14, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='visualizer', full_name='TrainingConfig.visualizer', index=14,
number=15, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='load_model', full_name='TrainingConfig.load_model',
index=0, containing_type=None, fields=[]),
],
serialized_start=343,
serialized_end=834,
)
_TRAININGCONFIG.fields_by_name['learning_rate'].message_type = nvidia__tao__tf1_dot_cv_dot_common_dot_proto_dot_learning__rate__config__pb2._LEARNINGRATECONFIG
_TRAININGCONFIG.fields_by_name['regularizer'].message_type = nvidia__tao__tf1_dot_cv_dot_common_dot_proto_dot_regularizer__config__pb2._REGULARIZERCONFIG
_TRAININGCONFIG.fields_by_name['optimizer'].message_type = nvidia__tao__tf1_dot_cv_dot_common_dot_proto_dot_optimizer__config__pb2._OPTIMIZERCONFIG
_TRAININGCONFIG.fields_by_name['cost_scaling'].message_type = nvidia__tao__tf1_dot_cv_dot_common_dot_proto_dot_cost__scaling__config__pb2._COSTSCALINGCONFIG
_TRAININGCONFIG.fields_by_name['visualizer'].message_type = nvidia__tao__tf1_dot_cv_dot_common_dot_proto_dot_visualizer__config__pb2._VISUALIZERCONFIG
_TRAININGCONFIG.oneofs_by_name['load_model'].fields.append(
_TRAININGCONFIG.fields_by_name['resume_model_path'])
_TRAININGCONFIG.fields_by_name['resume_model_path'].containing_oneof = _TRAININGCONFIG.oneofs_by_name['load_model']
_TRAININGCONFIG.oneofs_by_name['load_model'].fields.append(
_TRAININGCONFIG.fields_by_name['pretrain_model_path'])
_TRAININGCONFIG.fields_by_name['pretrain_model_path'].containing_oneof = _TRAININGCONFIG.oneofs_by_name['load_model']
_TRAININGCONFIG.oneofs_by_name['load_model'].fields.append(
_TRAININGCONFIG.fields_by_name['pruned_model_path'])
_TRAININGCONFIG.fields_by_name['pruned_model_path'].containing_oneof = _TRAININGCONFIG.oneofs_by_name['load_model']
DESCRIPTOR.message_types_by_name['TrainingConfig'] = _TRAININGCONFIG
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
TrainingConfig = _reflection.GeneratedProtocolMessageType('TrainingConfig', (_message.Message,), dict(
DESCRIPTOR = _TRAININGCONFIG,
__module__ = 'nvidia_tao_tf1.cv.yolo_v3.proto.training_config_pb2'
# @@protoc_insertion_point(class_scope:TrainingConfig)
))
_sym_db.RegisterMessage(TrainingConfig)
# @@protoc_insertion_point(module_scope)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/yolo_v3/proto/training_config_pb2.py |
tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/yolo_v3/proto/__init__.py |
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nvidia_tao_tf1/cv/yolo_v3/proto/augmentation_config.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='nvidia_tao_tf1/cv/yolo_v3/proto/augmentation_config.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n9nvidia_tao_tf1/cv/yolo_v3/proto/augmentation_config.proto\"\xf2\x02\n\x12\x41ugmentationConfig\x12\x0b\n\x03hue\x18\x01 \x01(\x02\x12\x12\n\nsaturation\x18\x02 \x01(\x02\x12\x10\n\x08\x65xposure\x18\x03 \x01(\x02\x12\x15\n\rvertical_flip\x18\x04 \x01(\x02\x12\x17\n\x0fhorizontal_flip\x18\x05 \x01(\x02\x12\x0e\n\x06jitter\x18\x06 \x01(\x02\x12\x14\n\x0coutput_width\x18\x07 \x01(\x05\x12\x15\n\routput_height\x18\x08 \x01(\x05\x12\x16\n\x0eoutput_channel\x18\t \x01(\x05\x12\x14\n\x0coutput_depth\x18\x0c \x01(\r\x12$\n\x1crandomize_input_shape_period\x18\n \x01(\x05\x12\x36\n\nimage_mean\x18\x0b \x03(\x0b\x32\".AugmentationConfig.ImageMeanEntry\x1a\x30\n\x0eImageMeanEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x02:\x02\x38\x01\x62\x06proto3')
)
_AUGMENTATIONCONFIG_IMAGEMEANENTRY = _descriptor.Descriptor(
name='ImageMeanEntry',
full_name='AugmentationConfig.ImageMeanEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='AugmentationConfig.ImageMeanEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='AugmentationConfig.ImageMeanEntry.value', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=_b('8\001'),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=384,
serialized_end=432,
)
_AUGMENTATIONCONFIG = _descriptor.Descriptor(
name='AugmentationConfig',
full_name='AugmentationConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='hue', full_name='AugmentationConfig.hue', index=0,
number=1, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='saturation', full_name='AugmentationConfig.saturation', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='exposure', full_name='AugmentationConfig.exposure', index=2,
number=3, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='vertical_flip', full_name='AugmentationConfig.vertical_flip', index=3,
number=4, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='horizontal_flip', full_name='AugmentationConfig.horizontal_flip', index=4,
number=5, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='jitter', full_name='AugmentationConfig.jitter', index=5,
number=6, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='output_width', full_name='AugmentationConfig.output_width', index=6,
number=7, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='output_height', full_name='AugmentationConfig.output_height', index=7,
number=8, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='output_channel', full_name='AugmentationConfig.output_channel', index=8,
number=9, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='output_depth', full_name='AugmentationConfig.output_depth', index=9,
number=12, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='randomize_input_shape_period', full_name='AugmentationConfig.randomize_input_shape_period', index=10,
number=10, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='image_mean', full_name='AugmentationConfig.image_mean', index=11,
number=11, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_AUGMENTATIONCONFIG_IMAGEMEANENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=62,
serialized_end=432,
)
_AUGMENTATIONCONFIG_IMAGEMEANENTRY.containing_type = _AUGMENTATIONCONFIG
_AUGMENTATIONCONFIG.fields_by_name['image_mean'].message_type = _AUGMENTATIONCONFIG_IMAGEMEANENTRY
DESCRIPTOR.message_types_by_name['AugmentationConfig'] = _AUGMENTATIONCONFIG
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
AugmentationConfig = _reflection.GeneratedProtocolMessageType('AugmentationConfig', (_message.Message,), dict(
ImageMeanEntry = _reflection.GeneratedProtocolMessageType('ImageMeanEntry', (_message.Message,), dict(
DESCRIPTOR = _AUGMENTATIONCONFIG_IMAGEMEANENTRY,
__module__ = 'nvidia_tao_tf1.cv.yolo_v3.proto.augmentation_config_pb2'
# @@protoc_insertion_point(class_scope:AugmentationConfig.ImageMeanEntry)
))
,
DESCRIPTOR = _AUGMENTATIONCONFIG,
__module__ = 'nvidia_tao_tf1.cv.yolo_v3.proto.augmentation_config_pb2'
# @@protoc_insertion_point(class_scope:AugmentationConfig)
))
_sym_db.RegisterMessage(AugmentationConfig)
_sym_db.RegisterMessage(AugmentationConfig.ImageMeanEntry)
_AUGMENTATIONCONFIG_IMAGEMEANENTRY._options = None
# @@protoc_insertion_point(module_scope)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/yolo_v3/proto/augmentation_config_pb2.py |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nvidia_tao_tf1/cv/yolo_v3/proto/yolov3_config.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='nvidia_tao_tf1/cv/yolo_v3/proto/yolov3_config.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n3nvidia_tao_tf1/cv/yolo_v3/proto/yolov3_config.proto\"\xca\x02\n\x0cYOLOv3Config\x12\x18\n\x10\x62ig_anchor_shape\x18\x01 \x01(\t\x12\x18\n\x10mid_anchor_shape\x18\x02 \x01(\t\x12\x1a\n\x12small_anchor_shape\x18\x03 \x01(\t\x12 \n\x18matching_neutral_box_iou\x18\x04 \x01(\x02\x12\x0c\n\x04\x61rch\x18\x05 \x01(\t\x12\x0f\n\x07nlayers\x18\x06 \x01(\r\x12\x18\n\x10\x61rch_conv_blocks\x18\x07 \x01(\r\x12\x17\n\x0floss_loc_weight\x18\x08 \x01(\x02\x12\x1c\n\x14loss_neg_obj_weights\x18\t \x01(\x02\x12\x1a\n\x12loss_class_weights\x18\n \x01(\x02\x12\x15\n\rfreeze_blocks\x18\x0b \x03(\x02\x12\x11\n\tfreeze_bn\x18\x0c \x01(\x08\x12\x12\n\nforce_relu\x18\r \x01(\x08\x62\x06proto3')
)
_YOLOV3CONFIG = _descriptor.Descriptor(
name='YOLOv3Config',
full_name='YOLOv3Config',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='big_anchor_shape', full_name='YOLOv3Config.big_anchor_shape', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='mid_anchor_shape', full_name='YOLOv3Config.mid_anchor_shape', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='small_anchor_shape', full_name='YOLOv3Config.small_anchor_shape', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='matching_neutral_box_iou', full_name='YOLOv3Config.matching_neutral_box_iou', index=3,
number=4, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='arch', full_name='YOLOv3Config.arch', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='nlayers', full_name='YOLOv3Config.nlayers', index=5,
number=6, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='arch_conv_blocks', full_name='YOLOv3Config.arch_conv_blocks', index=6,
number=7, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='loss_loc_weight', full_name='YOLOv3Config.loss_loc_weight', index=7,
number=8, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='loss_neg_obj_weights', full_name='YOLOv3Config.loss_neg_obj_weights', index=8,
number=9, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='loss_class_weights', full_name='YOLOv3Config.loss_class_weights', index=9,
number=10, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='freeze_blocks', full_name='YOLOv3Config.freeze_blocks', index=10,
number=11, type=2, cpp_type=6, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='freeze_bn', full_name='YOLOv3Config.freeze_bn', index=11,
number=12, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='force_relu', full_name='YOLOv3Config.force_relu', index=12,
number=13, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=56,
serialized_end=386,
)
DESCRIPTOR.message_types_by_name['YOLOv3Config'] = _YOLOV3CONFIG
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
YOLOv3Config = _reflection.GeneratedProtocolMessageType('YOLOv3Config', (_message.Message,), dict(
DESCRIPTOR = _YOLOV3CONFIG,
__module__ = 'nvidia_tao_tf1.cv.yolo_v3.proto.yolov3_config_pb2'
# @@protoc_insertion_point(class_scope:YOLOv3Config)
))
_sym_db.RegisterMessage(YOLOv3Config)
# @@protoc_insertion_point(module_scope)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/yolo_v3/proto/yolov3_config_pb2.py |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nvidia_tao_tf1/cv/yolo_v3/proto/experiment.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from nvidia_tao_tf1.cv.yolo_v3.proto import training_config_pb2 as nvidia__tao__tf1_dot_cv_dot_yolo__v3_dot_proto_dot_training__config__pb2
from nvidia_tao_tf1.cv.common.proto import eval_config_pb2 as nvidia__tao__tf1_dot_cv_dot_common_dot_proto_dot_eval__config__pb2
from nvidia_tao_tf1.cv.common.proto import nms_config_pb2 as nvidia__tao__tf1_dot_cv_dot_common_dot_proto_dot_nms__config__pb2
from nvidia_tao_tf1.cv.yolo_v3.proto import yolov3_config_pb2 as nvidia__tao__tf1_dot_cv_dot_yolo__v3_dot_proto_dot_yolov3__config__pb2
from nvidia_tao_tf1.cv.yolo_v3.proto import augmentation_config_pb2 as nvidia__tao__tf1_dot_cv_dot_yolo__v3_dot_proto_dot_augmentation__config__pb2
from nvidia_tao_tf1.cv.yolo_v3.proto import dataset_config_pb2 as nvidia__tao__tf1_dot_cv_dot_yolo__v3_dot_proto_dot_dataset__config__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='nvidia_tao_tf1/cv/yolo_v3/proto/experiment.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n0nvidia_tao_tf1/cv/yolo_v3/proto/experiment.proto\x1a\x35nvidia_tao_tf1/cv/yolo_v3/proto/training_config.proto\x1a\x30nvidia_tao_tf1/cv/common/proto/eval_config.proto\x1a/nvidia_tao_tf1/cv/common/proto/nms_config.proto\x1a\x33nvidia_tao_tf1/cv/yolo_v3/proto/yolov3_config.proto\x1a\x39nvidia_tao_tf1/cv/yolo_v3/proto/augmentation_config.proto\x1a\x34nvidia_tao_tf1/cv/yolo_v3/proto/dataset_config.proto\"\x93\x02\n\nExperiment\x12,\n\x0e\x64\x61taset_config\x18\x01 \x01(\x0b\x32\x14.YOLOv3DatasetConfig\x12\x30\n\x13\x61ugmentation_config\x18\x02 \x01(\x0b\x32\x13.AugmentationConfig\x12(\n\x0ftraining_config\x18\x03 \x01(\x0b\x32\x0f.TrainingConfig\x12 \n\x0b\x65val_config\x18\x04 \x01(\x0b\x32\x0b.EvalConfig\x12\x1e\n\nnms_config\x18\x05 \x01(\x0b\x32\n.NMSConfig\x12$\n\ryolov3_config\x18\x06 \x01(\x0b\x32\r.YOLOv3Config\x12\x13\n\x0brandom_seed\x18\x07 \x01(\rb\x06proto3')
,
dependencies=[nvidia__tao__tf1_dot_cv_dot_yolo__v3_dot_proto_dot_training__config__pb2.DESCRIPTOR,nvidia__tao__tf1_dot_cv_dot_common_dot_proto_dot_eval__config__pb2.DESCRIPTOR,nvidia__tao__tf1_dot_cv_dot_common_dot_proto_dot_nms__config__pb2.DESCRIPTOR,nvidia__tao__tf1_dot_cv_dot_yolo__v3_dot_proto_dot_yolov3__config__pb2.DESCRIPTOR,nvidia__tao__tf1_dot_cv_dot_yolo__v3_dot_proto_dot_augmentation__config__pb2.DESCRIPTOR,nvidia__tao__tf1_dot_cv_dot_yolo__v3_dot_proto_dot_dataset__config__pb2.DESCRIPTOR,])
_EXPERIMENT = _descriptor.Descriptor(
name='Experiment',
full_name='Experiment',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='dataset_config', full_name='Experiment.dataset_config', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='augmentation_config', full_name='Experiment.augmentation_config', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='training_config', full_name='Experiment.training_config', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='eval_config', full_name='Experiment.eval_config', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='nms_config', full_name='Experiment.nms_config', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='yolov3_config', full_name='Experiment.yolov3_config', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='random_seed', full_name='Experiment.random_seed', index=6,
number=7, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=373,
serialized_end=648,
)
_EXPERIMENT.fields_by_name['dataset_config'].message_type = nvidia__tao__tf1_dot_cv_dot_yolo__v3_dot_proto_dot_dataset__config__pb2._YOLOV3DATASETCONFIG
_EXPERIMENT.fields_by_name['augmentation_config'].message_type = nvidia__tao__tf1_dot_cv_dot_yolo__v3_dot_proto_dot_augmentation__config__pb2._AUGMENTATIONCONFIG
_EXPERIMENT.fields_by_name['training_config'].message_type = nvidia__tao__tf1_dot_cv_dot_yolo__v3_dot_proto_dot_training__config__pb2._TRAININGCONFIG
_EXPERIMENT.fields_by_name['eval_config'].message_type = nvidia__tao__tf1_dot_cv_dot_common_dot_proto_dot_eval__config__pb2._EVALCONFIG
_EXPERIMENT.fields_by_name['nms_config'].message_type = nvidia__tao__tf1_dot_cv_dot_common_dot_proto_dot_nms__config__pb2._NMSCONFIG
_EXPERIMENT.fields_by_name['yolov3_config'].message_type = nvidia__tao__tf1_dot_cv_dot_yolo__v3_dot_proto_dot_yolov3__config__pb2._YOLOV3CONFIG
DESCRIPTOR.message_types_by_name['Experiment'] = _EXPERIMENT
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Experiment = _reflection.GeneratedProtocolMessageType('Experiment', (_message.Message,), dict(
DESCRIPTOR = _EXPERIMENT,
__module__ = 'nvidia_tao_tf1.cv.yolo_v3.proto.experiment_pb2'
# @@protoc_insertion_point(class_scope:Experiment)
))
_sym_db.RegisterMessage(Experiment)
# @@protoc_insertion_point(module_scope)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/yolo_v3/proto/experiment_pb2.py |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nvidia_tao_tf1/cv/yolo_v3/proto/dataset_config.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='nvidia_tao_tf1/cv/yolo_v3/proto/dataset_config.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n4nvidia_tao_tf1/cv/yolo_v3/proto/dataset_config.proto\"\xa5\x01\n\x10YOLOv3DataSource\x12\x1e\n\x14label_directory_path\x18\x01 \x01(\tH\x00\x12\x18\n\x0etfrecords_path\x18\x02 \x01(\tH\x00\x12\x1c\n\x14image_directory_path\x18\x03 \x01(\t\x12\x11\n\troot_path\x18\x04 \x01(\t\x12\x15\n\rsource_weight\x18\x05 \x01(\x02\x42\x0f\n\rlabels_format\"\xf7\x02\n\x13YOLOv3DatasetConfig\x12\'\n\x0c\x64\x61ta_sources\x18\x01 \x03(\x0b\x32\x11.YOLOv3DataSource\x12J\n\x14target_class_mapping\x18\x02 \x03(\x0b\x32,.YOLOv3DatasetConfig.TargetClassMappingEntry\x12\x17\n\x0fvalidation_fold\x18\x04 \x01(\r\x12\x32\n\x17validation_data_sources\x18\x03 \x03(\x0b\x32\x11.YOLOv3DataSource\x12%\n\x1dinclude_difficult_in_training\x18\x07 \x01(\x08\x12\x0c\n\x04type\x18\x05 \x01(\t\x12\x17\n\x0fimage_extension\x18\x06 \x01(\t\x12\x15\n\ris_monochrome\x18\x08 \x01(\x08\x1a\x39\n\x17TargetClassMappingEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x62\x06proto3')
)
_YOLOV3DATASOURCE = _descriptor.Descriptor(
name='YOLOv3DataSource',
full_name='YOLOv3DataSource',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='label_directory_path', full_name='YOLOv3DataSource.label_directory_path', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='tfrecords_path', full_name='YOLOv3DataSource.tfrecords_path', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='image_directory_path', full_name='YOLOv3DataSource.image_directory_path', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='root_path', full_name='YOLOv3DataSource.root_path', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='source_weight', full_name='YOLOv3DataSource.source_weight', index=4,
number=5, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='labels_format', full_name='YOLOv3DataSource.labels_format',
index=0, containing_type=None, fields=[]),
],
serialized_start=57,
serialized_end=222,
)
_YOLOV3DATASETCONFIG_TARGETCLASSMAPPINGENTRY = _descriptor.Descriptor(
name='TargetClassMappingEntry',
full_name='YOLOv3DatasetConfig.TargetClassMappingEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='YOLOv3DatasetConfig.TargetClassMappingEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='YOLOv3DatasetConfig.TargetClassMappingEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=_b('8\001'),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=543,
serialized_end=600,
)
_YOLOV3DATASETCONFIG = _descriptor.Descriptor(
name='YOLOv3DatasetConfig',
full_name='YOLOv3DatasetConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='data_sources', full_name='YOLOv3DatasetConfig.data_sources', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='target_class_mapping', full_name='YOLOv3DatasetConfig.target_class_mapping', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='validation_fold', full_name='YOLOv3DatasetConfig.validation_fold', index=2,
number=4, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='validation_data_sources', full_name='YOLOv3DatasetConfig.validation_data_sources', index=3,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='include_difficult_in_training', full_name='YOLOv3DatasetConfig.include_difficult_in_training', index=4,
number=7, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='type', full_name='YOLOv3DatasetConfig.type', index=5,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='image_extension', full_name='YOLOv3DatasetConfig.image_extension', index=6,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='is_monochrome', full_name='YOLOv3DatasetConfig.is_monochrome', index=7,
number=8, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_YOLOV3DATASETCONFIG_TARGETCLASSMAPPINGENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=225,
serialized_end=600,
)
_YOLOV3DATASOURCE.oneofs_by_name['labels_format'].fields.append(
_YOLOV3DATASOURCE.fields_by_name['label_directory_path'])
_YOLOV3DATASOURCE.fields_by_name['label_directory_path'].containing_oneof = _YOLOV3DATASOURCE.oneofs_by_name['labels_format']
_YOLOV3DATASOURCE.oneofs_by_name['labels_format'].fields.append(
_YOLOV3DATASOURCE.fields_by_name['tfrecords_path'])
_YOLOV3DATASOURCE.fields_by_name['tfrecords_path'].containing_oneof = _YOLOV3DATASOURCE.oneofs_by_name['labels_format']
_YOLOV3DATASETCONFIG_TARGETCLASSMAPPINGENTRY.containing_type = _YOLOV3DATASETCONFIG
_YOLOV3DATASETCONFIG.fields_by_name['data_sources'].message_type = _YOLOV3DATASOURCE
_YOLOV3DATASETCONFIG.fields_by_name['target_class_mapping'].message_type = _YOLOV3DATASETCONFIG_TARGETCLASSMAPPINGENTRY
_YOLOV3DATASETCONFIG.fields_by_name['validation_data_sources'].message_type = _YOLOV3DATASOURCE
DESCRIPTOR.message_types_by_name['YOLOv3DataSource'] = _YOLOV3DATASOURCE
DESCRIPTOR.message_types_by_name['YOLOv3DatasetConfig'] = _YOLOV3DATASETCONFIG
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
YOLOv3DataSource = _reflection.GeneratedProtocolMessageType('YOLOv3DataSource', (_message.Message,), dict(
DESCRIPTOR = _YOLOV3DATASOURCE,
__module__ = 'nvidia_tao_tf1.cv.yolo_v3.proto.dataset_config_pb2'
# @@protoc_insertion_point(class_scope:YOLOv3DataSource)
))
_sym_db.RegisterMessage(YOLOv3DataSource)
YOLOv3DatasetConfig = _reflection.GeneratedProtocolMessageType('YOLOv3DatasetConfig', (_message.Message,), dict(
TargetClassMappingEntry = _reflection.GeneratedProtocolMessageType('TargetClassMappingEntry', (_message.Message,), dict(
DESCRIPTOR = _YOLOV3DATASETCONFIG_TARGETCLASSMAPPINGENTRY,
__module__ = 'nvidia_tao_tf1.cv.yolo_v3.proto.dataset_config_pb2'
# @@protoc_insertion_point(class_scope:YOLOv3DatasetConfig.TargetClassMappingEntry)
))
,
DESCRIPTOR = _YOLOV3DATASETCONFIG,
__module__ = 'nvidia_tao_tf1.cv.yolo_v3.proto.dataset_config_pb2'
# @@protoc_insertion_point(class_scope:YOLOv3DatasetConfig)
))
_sym_db.RegisterMessage(YOLOv3DatasetConfig)
_sym_db.RegisterMessage(YOLOv3DatasetConfig.TargetClassMappingEntry)
_YOLOV3DATASETCONFIG_TARGETCLASSMAPPINGENTRY._options = None
# @@protoc_insertion_point(module_scope)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/yolo_v3/proto/dataset_config_pb2.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''build model for training.'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
def _load_pretrain_weights(pretrain_model, train_model):
"""Load weights in pretrain model to model."""
strict_mode = True
for layer in train_model.layers[1:]:
# The layer must match up to yolo layers.
if layer.name.find('yolo_') != -1:
strict_mode = False
try:
l_return = pretrain_model.get_layer(layer.name)
except ValueError:
if strict_mode and layer.name[-3:] != 'qdq' and len(layer.get_weights()) != 0:
raise ValueError(layer.name + ' not found in pretrained model.')
# Ignore QDQ
continue
try:
layer.set_weights(l_return.get_weights())
except ValueError:
if strict_mode:
raise ValueError(layer.name + ' has incorrect shape in pretrained model.')
continue
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/yolo_v3/builders/model_builder.py |
tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/yolo_v3/builders/__init__.py |
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''build model for evaluation.'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from keras.models import Model
from nvidia_tao_tf1.cv.yolo_v3.layers.decode_layer import YOLODecodeLayer
from nvidia_tao_tf1.cv.yolo_v3.layers.nms_layer import NMSLayer
def build(training_model,
confidence_thresh=0.05,
iou_threshold=0.5,
top_k=200,
include_encoded_head=False,
nms_on_cpu=False):
'''
build model for evaluation.
Args:
training_model: Keras model built from model_builder. Last layer is encoded prediction.
confidence_thresh: ignore all boxes with confidence less then threshold
iou_threshold: iou threshold for NMS
top_k: pick top k boxes with highest confidence scores after NMS
include_encoded_head: whether to include original model output into final model output.
nms_on_cpu(bool): Flag to force NMS to run on CPU as GPU NMS is flaky with tfrecord dataset.
Returns:
eval_model: keras model that outputs at most top_k detection boxes.
'''
decoded_predictions = YOLODecodeLayer(name='decoded_predictions')
x = decoded_predictions(training_model.layers[-1].output)
nms = NMSLayer(output_size=top_k,
iou_threshold=iou_threshold,
score_threshold=confidence_thresh,
force_on_cpu=nms_on_cpu,
name="NMS")
x = nms(x)
x = [training_model.layers[-1].output, x] if include_encoded_head else x
eval_model = Model(inputs=training_model.layers[0].input,
outputs=x)
return eval_model
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/yolo_v3/builders/eval_builder.py |
tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/yolo_v3/utils/__init__.py |
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper function to load model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
import keras
from nvidia_tao_tf1.cv.common.utils import CUSTOM_OBJS
from nvidia_tao_tf1.cv.common.utils import load_keras_model
from nvidia_tao_tf1.cv.yolo_v3.losses.yolo_loss import YOLOv3Loss
from nvidia_tao_tf1.encoding import encoding
def get_model_with_input(model_path, input_layer):
"""Implement a trick to replace input tensor."""
def get_input_layer(*arg, **kargs):
return input_layer
# Following syntax only works in python3.
return keras.models.load_model(model_path,
custom_objects={**CUSTOM_OBJS, 'InputLayer': get_input_layer})
def load_model(model_path, experiment_spec=None, input_shape=None, key=None):
"""Load a model either in .tlt format or .hdf5 format."""
_, ext = os.path.splitext(model_path)
if ext == '.hdf5':
yololoss = YOLOv3Loss(experiment_spec.yolov3_config.loss_loc_weight,
experiment_spec.yolov3_config.loss_neg_obj_weights,
experiment_spec.yolov3_config.loss_class_weights,
experiment_spec.yolov3_config.matching_neutral_box_iou)
CUSTOM_OBJS['compute_loss'] = yololoss.compute_loss
# directly load model, add dummy loss since loss is never required.
if input_shape is None:
# load the model to get img width/height
model = load_keras_model(model_path,
custom_objects=CUSTOM_OBJS)
else:
input_layer = keras.layers.InputLayer(input_shape=input_shape, name="Input")
model = get_model_with_input(model_path, input_layer)
elif ext == '.tlt':
os_handle, temp_file_name = tempfile.mkstemp(suffix='.hdf5')
os.close(os_handle)
with open(temp_file_name, 'wb') as temp_file, open(model_path, 'rb') as encoded_file:
encoding.decode(encoded_file, temp_file, key)
encoded_file.close()
temp_file.close()
# recursive call
model = load_model(temp_file_name, experiment_spec, input_shape, None)
os.remove(temp_file_name)
else:
raise NotImplementedError("{0} file is not supported!".format(ext))
return model
def save_model(keras_model, model_path, key, save_format=None):
"""Save a model to either .h5, .tlt or .hdf5 format."""
_, ext = os.path.splitext(model_path)
if (save_format is not None) and (save_format != ext):
# recursive call to save a correct model
return save_model(keras_model, model_path + save_format, key, None)
if ext == '.hdf5':
keras_model.save(model_path, overwrite=True, include_optimizer=True)
else:
raise NotImplementedError("{0} file is not supported!".format(ext))
return model_path
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/yolo_v3/utils/model_io.py |
"""Tensor related utility functions."""
import tensorflow as tf
def get_init_ops():
"""Return all ops required for initialization."""
return tf.group(tf.local_variables_initializer(),
tf.tables_initializer(),
*tf.get_collection('iterator_init'))
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/yolo_v3/utils/tensor_utils.py |
# Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
"""Load an experiment spec file to run YOLOv3 training, evaluation, pruning."""
from google.protobuf.text_format import Merge as merge_text_proto
import nvidia_tao_tf1.cv.yolo_v3.proto.experiment_pb2 as experiment_pb2
def load_experiment_spec(spec_path=None):
"""Load experiment spec from a .txt file and return an experiment_pb2.Experiment object.
Args:
spec_path (str): location of a file containing the custom experiment spec proto.
Returns:
experiment_spec: protocol buffer instance of type experiment_pb2.Experiment.
"""
experiment_spec = experiment_pb2.Experiment()
merge_text_proto(open(spec_path, "r").read(), experiment_spec)
# dataset_config
assert len(experiment_spec.dataset_config.target_class_mapping.values()) > 0, \
"Please specify target_class_mapping"
data_sources = experiment_spec.dataset_config.data_sources
assert len(data_sources) > 0, "Please specify training data sources"
train_label_types = [
s.WhichOneof("labels_format") for s in data_sources
]
assert len(list(set(train_label_types))) == 1, (
"Label format should be identical for all training data sources. Got {}".format(
train_label_types
)
)
if train_label_types[0] == "tfrecords_path":
assert len(experiment_spec.dataset_config.image_extension) > 0, (
"`image_extension` should be specified in `dataset_config` if training "
" label format is TFRecord."
)
if len(experiment_spec.dataset_config.validation_data_sources) > 0:
val_data_source = experiment_spec.dataset_config.validation_data_sources
val_label_types = [
s.WhichOneof("labels_format") for s in val_data_source
]
assert len(list(set(val_label_types))) == 1, (
"Label format should be identical for all validation data sources. Got {}".format(
val_label_types
)
)
if val_label_types[0] == "tfrecords_path":
assert len(experiment_spec.dataset_config.image_extension) > 0, (
"`image_extension` should be specified in `dataset_config` if validation "
" label format is TFRecord."
)
else:
assert data_sources[0].WhichOneof("labels_format") == "tfrecords_path", (
"Validation dataset specified by `validation_fold` requires the training label format "
"to be TFRecords."
)
# augmentation config
assert experiment_spec.augmentation_config.output_channel in [1, 3], \
"output_channel must be either 1 or 3."
img_mean = experiment_spec.augmentation_config.image_mean
if experiment_spec.augmentation_config.output_channel == 3:
if img_mean:
assert all(c in img_mean for c in ['r', 'g', 'b']) , (
"'r', 'g', 'b' should all be present in image_mean "
"for images with 3 channels."
)
else:
if img_mean:
assert 'l' in img_mean, (
"'l' should be present in image_mean for images "
"with 1 channel."
)
assert 0.0 <= experiment_spec.augmentation_config.hue <= 1.0, "hue must be within [0, 1]"
assert experiment_spec.augmentation_config.saturation >= 1.0, "saturation must be at least 1.0"
assert experiment_spec.augmentation_config.exposure >= 1.0, "exposure must be at least 1.0"
assert 0.0 <= experiment_spec.augmentation_config.vertical_flip <= 1.0, \
"vertical_flip must be within [0, 1]"
assert 0.0 <= experiment_spec.augmentation_config.horizontal_flip <= 1.0, \
"horizontal_flip must be within [0, 1]"
assert 0.0 <= experiment_spec.augmentation_config.jitter <= 1.0, "jitter must be within [0, 1]"
assert experiment_spec.augmentation_config.output_width >= 32, "width must be at least 32"
assert experiment_spec.augmentation_config.output_width % 32 == 0, \
"width must be multiple of 32"
assert experiment_spec.augmentation_config.output_height >= 32, "height must be at least 32"
assert experiment_spec.augmentation_config.output_height % 32 == 0, \
"height must be multiple of 32"
assert experiment_spec.augmentation_config.randomize_input_shape_period >= 0, \
"randomize_input_shape_period should be non-negative"
# training config
assert experiment_spec.training_config.batch_size_per_gpu > 0, "batch size must be positive"
assert experiment_spec.training_config.num_epochs > 0, \
"number of training batchs must be positive"
assert experiment_spec.training_config.checkpoint_interval > 0, \
"checkpoint interval must be positive"
# eval config
assert experiment_spec.eval_config.batch_size > 0, "batch size must be positive"
assert 0.0 < experiment_spec.eval_config.matching_iou_threshold <= 1.0, \
"matching_iou_threshold must be within (0, 1]"
# nms config
assert 0.0 < experiment_spec.nms_config.clustering_iou_threshold <= 1.0, \
"clustering_iou_threshold must be within (0, 1]"
# yolo_v3 config
assert 0.0 < experiment_spec.yolov3_config.matching_neutral_box_iou < 1.0, \
"matching_neutral_box_iou must be within (0, 1]"
assert experiment_spec.yolov3_config.arch_conv_blocks in [0, 1, 2], \
"arch_conv_blocks must be either 0, 1 or 2"
assert experiment_spec.yolov3_config.loss_loc_weight >= 0.0, \
"all loss weights must be non-negative"
assert experiment_spec.yolov3_config.loss_neg_obj_weights >= 0.0, \
"all loss weights must be non-negative"
assert experiment_spec.yolov3_config.loss_class_weights >= 0.0, \
"all loss weights must be non-negative"
return experiment_spec
def validation_labels_format(spec):
"""The format of the labels of validation set."""
if len(spec.dataset_config.validation_data_sources) > 0:
if (
spec.dataset_config.validation_data_sources[0].WhichOneof("labels_format") ==
"label_directory_path"
):
return "keras_sequence"
return "tfrecords"
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/yolo_v3/utils/spec_loader.py |
"""YOLO v3 class to build the model and pipelines."""
from contextlib import contextmanager
from math import ceil
from multiprocessing import cpu_count
import os
import shutil
import tempfile
import keras
from keras.backend import set_learning_phase
from keras.callbacks import TerminateOnNaN
from keras.layers import Input
from keras.models import Model
import numpy as np
import six
import tensorflow as tf
from nvidia_tao_tf1.cv.common.callbacks.detection_metric_callback import DetectionMetricCallback
from nvidia_tao_tf1.cv.common.callbacks.enc_model_saver_callback import KerasModelSaver
from nvidia_tao_tf1.cv.common.callbacks.loggers import TAOStatusLogger
from nvidia_tao_tf1.cv.common.evaluator.ap_evaluator import APEvaluator
from nvidia_tao_tf1.cv.common.utils import (
build_optimizer_from_config,
build_regularizer_from_config,
CUSTOM_OBJS,
TensorBoard
)
from nvidia_tao_tf1.cv.common.utils import OneIndexedCSVLogger as CSVLogger
from nvidia_tao_tf1.cv.common.utils import SoftStartAnnealingLearningRateScheduler as LRS
from nvidia_tao_tf1.cv.yolo_v3.architecture.yolo_arch import YOLO
from nvidia_tao_tf1.cv.yolo_v3.builders import eval_builder
from nvidia_tao_tf1.cv.yolo_v3.builders.model_builder import _load_pretrain_weights
from nvidia_tao_tf1.cv.yolo_v3.data_loader.data_loader import YOLOv3DataPipe
from nvidia_tao_tf1.cv.yolo_v3.data_loader.generate_shape_tensors import gen_random_shape_tensors
from nvidia_tao_tf1.cv.yolo_v3.losses.yolo_loss import YOLOv3Loss
from nvidia_tao_tf1.cv.yolo_v3.metric.yolov3_metric_callback import YOLOv3MetricCallback
from nvidia_tao_tf1.cv.yolo_v3.utils import model_io
from nvidia_tao_tf1.cv.yolo_v3.utils.model_io import get_model_with_input
from nvidia_tao_tf1.cv.yolo_v3.utils.spec_loader import validation_labels_format
from nvidia_tao_tf1.cv.yolo_v4.utils.fit_generator import fit_generator
@contextmanager
def patch_freeze_bn(freeze_bn):
"""context for patching BN to freeze it during model creation."""
def compose_call(prev_call_method):
def call(self, inputs, training=False):
return prev_call_method(self, inputs, training)
return call
prev_batchnorm_call = keras.layers.normalization.BatchNormalization.call
if freeze_bn:
keras.layers.normalization.BatchNormalization.call = compose_call(
prev_batchnorm_call
)
yield
if freeze_bn:
keras.layers.normalization.BatchNormalization.call = prev_batchnorm_call
class YOLOv3Model(object):
"""YOLO v3 model."""
def __init__(self, spec, key):
"""Initialize."""
self.spec = spec
self.yolov3_config = spec.yolov3_config
self.key = key
# dataset classes
self.class_mapping = spec.dataset_config.target_class_mapping
self.classes = sorted({str(x).lower() for x in self.class_mapping.values()})
self.n_classes = len(self.classes)
# model architecture
self.arch = spec.yolov3_config.arch
self.arch_name = self.arch
if self.arch_name in ['resnet', 'darknet', 'vgg']:
# append nlayers into meta_arch_name
self.arch_name += str(spec.yolov3_config.nlayers)
self.nlayers = spec.yolov3_config.nlayers
self.freeze_blocks = spec.yolov3_config.freeze_blocks
self.freeze_bn = spec.yolov3_config.freeze_bn
self.arch_conv_blocks = spec.yolov3_config.arch_conv_blocks
self.force_relu = spec.yolov3_config.force_relu
self.qat = spec.training_config.enable_qat
# NMS config
self.nms_confidence_thresh = spec.nms_config.confidence_threshold
self.nms_iou_threshold = spec.nms_config.clustering_iou_threshold
self.nms_top_k = spec.nms_config.top_k
# If using TFRecords, force NMS on CPU
self.nms_on_cpu = False
if self.train_labels_format == "tfrecords" or self.val_labels_format == "tfrecords":
self.nms_on_cpu = True
# evaluation params
self.ap_mode = spec.eval_config.average_precision_mode
matching_iou = spec.eval_config.matching_iou_threshold
self.matching_iou = matching_iou if matching_iou > 0 else 0.5
self.ap_mode_dict = {0: "sample", 1: "integrate"}
self.average_precision_mode = self.ap_mode_dict[self.ap_mode]
# training
self.training_config = spec.training_config
self.use_mp = spec.training_config.use_multiprocessing
self.n_workers = spec.training_config.n_workers or (cpu_count()-1)
self.max_queue_size = spec.training_config.max_queue_size or 20
self.num_epochs = spec.training_config.num_epochs
self.bs = spec.training_config.batch_size_per_gpu
self.lrconfig = spec.training_config.learning_rate.soft_start_annealing_schedule
self.ckpt_interval = spec.training_config.checkpoint_interval
self.augmentation_config = spec.augmentation_config
self.image_channels = int(self.augmentation_config.output_channel)
self.image_width = int(self.augmentation_config.output_width)
self.image_height = int(self.augmentation_config.output_height)
self.shape_period = int(self.augmentation_config.randomize_input_shape_period)
self.load_type = spec.training_config.WhichOneof('load_model')
self.hmin_ratio = 0.6
self.hmax_ratio = 1.5
self.wmin_ratio = 0.6
self.wmax_ratio = 1.5
self.build_regularizer()
self.generate_random_shape()
self.h_tensor_val = tf.constant(
self.image_height,
dtype=tf.int32
)
self.w_tensor_val = tf.constant(
self.image_width,
dtype=tf.int32
)
self.get_val_fmap_stride()
self.parse_init_epoch()
self.callbacks = []
self.losses = None
self.metrics = None
self.optimizer = None
self.target_tensors = None
self.tb_callback = None
def generate_random_shape(self):
"""generate random shape for multi-scale training."""
if self.shape_period > 0:
self.h_tensor, self.w_tensor = gen_random_shape_tensors(
self.shape_period,
int(self.image_height * self.hmin_ratio),
int(self.image_height * self.hmax_ratio),
int(self.image_width * self.wmin_ratio),
int(self.image_width * self.wmax_ratio)
)
else:
self.h_tensor = tf.constant(
self.image_height,
dtype=tf.int32
)
self.w_tensor = tf.constant(
self.image_width,
dtype=tf.int32
)
def parse_init_epoch(self):
"""Parse initial epoch."""
if self.load_type == 'resume_model_path':
try:
epoch = int(self.training_config.resume_model_path.split('.')[-2].split('_')[-1])
except Exception:
raise ValueError("Cannot parse the checkpoint path. Did you rename it?")
else:
epoch = 0
self.init_epoch = epoch
@property
def train_labels_format(self):
"""The format of the labels of training set."""
if self.spec.dataset_config.data_sources[0].WhichOneof("labels_format") == \
"tfrecords_path":
return "tfrecords"
return "keras_sequence"
@property
def val_labels_format(self):
"""The format of the labels of validation set."""
return validation_labels_format(self.spec)
def build_regularizer(self):
"""build regularizer."""
self.regularizer = build_regularizer_from_config(
self.training_config.regularizer
)
def build_optimizer(self, hvd):
"""build optimizer."""
optim = build_optimizer_from_config(
self.training_config.optimizer
)
self.set_optimizer(optim, hvd)
def eval_str(self, s):
"""If s is a string, return the eval results. Else return itself."""
if isinstance(s, six.string_types):
if len(s) > 0:
return eval(s)
return None
return s
@property
def big_anchor_shape(self):
"""big anchor shape."""
big_anchor = self.eval_str(self.yolov3_config.big_anchor_shape)
assert len(big_anchor) > 0, "big_anchor_shape in spec cannot be empty"
return big_anchor
@property
def mid_anchor_shape(self):
"""middle anchor shape."""
mid_anchor = self.eval_str(self.yolov3_config.mid_anchor_shape)
assert len(mid_anchor) > 0, "mid_anchor_shape in spec cannot be empty"
return mid_anchor
@property
def small_anchor_shape(self):
"""small anchor shape."""
small_anchor = self.eval_str(self.yolov3_config.small_anchor_shape)
assert len(small_anchor) > 0, "small_anchor_shape in spec cannot be empty"
return small_anchor
def anchor_to_relative(self, x):
"""convert absolute anchors to relative anchors."""
return (np.array(x, dtype=np.float).reshape(-1, 2) / np.array(
[self.image_width, self.image_height]).reshape(1, 2)).tolist()
@property
def all_anchors(self):
"""all absolute anchors."""
return [self.big_anchor_shape, self.mid_anchor_shape, self.small_anchor_shape]
@property
def all_relative_anchors(self):
"""all relative anchors."""
return [self.anchor_to_relative(x) for x in self.all_anchors]
def build_keras_model(self, input_image=None, input_shape=None, val=False):
"""build a keras model from scratch."""
model_input = Input(
shape=input_shape or (self.image_channels, None, None),
tensor=input_image,
name="Input"
)
yolo_model = YOLO(
model_input,
self.arch,
self.nlayers,
num_classes=self.n_classes,
kernel_regularizer=self.regularizer,
anchors=self.all_relative_anchors,
freeze_blocks=self.freeze_blocks,
freeze_bn=self.freeze_bn,
arch_conv_blocks=self.arch_conv_blocks,
qat=self.qat,
force_relu=self.force_relu
)
if val:
# if it is a validation model, return it directly
return yolo_model
# rename it
self.keras_model = Model(
inputs=model_input,
outputs=yolo_model.outputs,
name='yolo_' + self.arch
)
self.inputs = self.keras_model.inputs
self.outputs = self.keras_model.outputs
return None
def load_pretrained_model(self, model_path):
"""load pretrained model's weights."""
pretrained_model = model_io.load_model(
model_path,
self.spec,
key=self.key
)
_load_pretrain_weights(pretrained_model, self.keras_model)
def override_regularizer(self, train_model):
"""override regularizer."""
model_config = train_model.get_config()
for layer, layer_config in zip(train_model.layers, model_config['layers']):
if hasattr(layer, 'kernel_regularizer'):
layer_config['config']['kernel_regularizer'] = self.regularizer
reg_model = Model.from_config(
model_config,
custom_objects=CUSTOM_OBJS
)
reg_model.set_weights(train_model.get_weights())
return reg_model
def apply_model_to_new_inputs(self, model, tensor, input_shape):
"""Apply model to new inputs."""
input_layer = keras.layers.InputLayer(
input_shape=input_shape,
input_tensor=tensor,
name="Input",
)
_, temp_model_path = tempfile.mkstemp()
os.remove(temp_model_path)
model.save(temp_model_path)
with patch_freeze_bn(self.freeze_bn):
new_model = get_model_with_input(temp_model_path, input_layer)
os.remove(temp_model_path)
return new_model
def load_pruned_model(self, pruned_model_path, input_tensor, input_shape):
"""load pruned model."""
pruned_model = model_io.load_model(
pruned_model_path,
self.spec,
key=self.key,
input_shape=input_shape
)
pruned_model = self.override_regularizer(
pruned_model
)
if input_tensor is not None:
self.keras_model = self.apply_model_to_new_inputs(
pruned_model,
input_tensor,
input_shape
)
else:
self.keras_model = pruned_model
self.inputs = self.keras_model.inputs
self.outputs = self.keras_model.outputs
def set_optimizer(self, opt, hvd):
'''setup optimizer.'''
if self.optimizer is not None:
return
self.optimizer = hvd.DistributedOptimizer(opt)
def resume_model(self, checkpoint_path, input_tensor, input_shape, hvd):
'''resume model from checkpoints and continue to train.'''
resumed_model = model_io.load_model(
checkpoint_path,
self.spec,
key=self.key,
input_shape=input_shape
)
optimizer = resumed_model.optimizer
if input_tensor is not None:
resumed_model = self.apply_model_to_new_inputs(
resumed_model,
input_tensor,
input_shape
)
self.keras_model = resumed_model
self.inputs = self.keras_model.inputs
self.outputs = self.keras_model.outputs
self.set_optimizer(optimizer, hvd)
def set_target_tensors(self, encoded_labels):
"""set target tensors."""
if self.target_tensors is not None:
return
self.target_tensors = [encoded_labels]
def build_losses(self):
"""build loss."""
if self.losses is not None:
return
yololoss = YOLOv3Loss(
self.spec.yolov3_config.loss_loc_weight,
self.spec.yolov3_config.loss_neg_obj_weights,
self.spec.yolov3_config.loss_class_weights,
self.spec.yolov3_config.matching_neutral_box_iou
)
self.losses = [yololoss.compute_loss]
def build_hvd_callbacks(self, hvd):
'''setup horovod callbacks.'''
self.callbacks.append(hvd.callbacks.BroadcastGlobalVariablesCallback(0))
self.callbacks.append(hvd.callbacks.MetricAverageCallback())
self.callbacks.append(TerminateOnNaN())
def build_lr_scheduler(self, train_dataset, hvd):
"""build LR scheduler."""
init_epoch = self.init_epoch
if type(train_dataset) == YOLOv3DataPipe:
total_num = train_dataset.num_samples
else:
total_num = train_dataset.n_samples
iters_per_epoch = int(ceil(total_num / self.bs / hvd.size()))
max_iterations = self.num_epochs * iters_per_epoch
lr_scheduler = LRS(
base_lr=self.lrconfig.max_learning_rate * hvd.size(),
min_lr_ratio=self.lrconfig.min_learning_rate / self.lrconfig.max_learning_rate,
soft_start=self.lrconfig.soft_start,
annealing_start=self.lrconfig.annealing,
max_iterations=max_iterations
)
init_step = init_epoch * iters_per_epoch
lr_scheduler.reset(init_step)
self.callbacks.append(lr_scheduler)
self.iters_per_epoch = iters_per_epoch
def build_checkpointer(self, ckpt_path, verbose):
"""build checkpointer."""
model_checkpointer = KerasModelSaver(
ckpt_path,
self.key,
self.ckpt_interval,
last_epoch=self.num_epochs,
verbose=verbose
)
self.callbacks.append(model_checkpointer)
def build_csvlogger(self, csv_path):
"""build CSV logger."""
csv_logger = CSVLogger(
filename=csv_path,
separator=',',
append=False
)
self.callbacks.append(csv_logger)
def build_training_model(self, hvd):
"""build the training model in various cases."""
if type(self.train_dataset) == YOLOv3DataPipe:
input_image = self.train_dataset.images
else:
input_image = None
if self.load_type == "resume_model_path":
self.resume_model(
self.training_config.resume_model_path,
input_image,
(self.image_channels, None, None),
hvd
)
elif self.load_type == "pruned_model_path":
self.load_pruned_model(
self.training_config.pruned_model_path,
input_image,
(self.image_channels, None, None)
)
else:
self.build_keras_model(
input_image
)
if self.training_config.pretrain_model_path:
self.load_pretrained_model(
self.training_config.pretrain_model_path
)
# get predictor sizes for later use
predictor_names = [
'conv_big_object',
'conv_mid_object',
'conv_sm_object'
]
predictor_layers = [
self.keras_model.get_layer(n) for n in predictor_names
]
self.predictor_sizes = [tf.shape(l.output)[2:4] for l in predictor_layers]
def build_validation_model(self):
"""build validation model."""
# set eval phase at first
assert self.keras_model is not None, (
"""Training model has to be built before validation model."""
)
set_learning_phase(0)
input_shape = (self.image_channels, self.image_height, self.image_width)
input_layer = keras.layers.InputLayer(
input_shape=input_shape,
input_tensor=None,
name="Input",
)
_, temp_model_path = tempfile.mkstemp()
os.remove(temp_model_path)
self.keras_model.save(temp_model_path)
with patch_freeze_bn(self.freeze_bn):
val_model = get_model_with_input(temp_model_path, input_layer)
os.remove(temp_model_path)
self._val_model = val_model
# setup validation model predictor sizes for later use
predictor_names = [
'conv_big_object',
'conv_mid_object',
'conv_sm_object'
]
predictor_layers = [
self._val_model.get_layer(n) for n in predictor_names
]
self.val_predictor_sizes = [l.output_shape[2:] for l in predictor_layers]
self.val_fmap_stride = [
(self.image_height // x[0], self.image_width // x[1]) for x in self.val_predictor_sizes
]
# restore learning phase to 1
set_learning_phase(1)
self.val_model = eval_builder.build(
val_model,
confidence_thresh=self.nms_confidence_thresh,
iou_threshold=self.nms_iou_threshold,
top_k=self.nms_top_k,
include_encoded_head=True,
nms_on_cpu=self.nms_on_cpu
)
def get_val_fmap_stride(self):
"""build a dummy validation model to get val_fmap_stride."""
# set eval phase at first
set_learning_phase(0)
# it doesn't matter whether the train model is pruned or not,
# since we just care about the height/width of the predictor
# feature maps. Channel number is irrelevant.
val_model = self.build_keras_model(
input_shape=(self.image_channels, self.image_height, self.image_width),
val=True
)
# restore learning phase to 1
set_learning_phase(1)
# setup validation model predictor sizes for later use
predictor_names = [
'conv_big_object',
'conv_mid_object',
'conv_sm_object'
]
predictor_layers = [
val_model.get_layer(n) for n in predictor_names
]
val_predictor_sizes = [l.output_shape[2:4] for l in predictor_layers]
fmap_stride = [
(self.image_height // x[0], self.image_width // x[1]) for x in val_predictor_sizes
]
self.val_fmap_stride = fmap_stride
def build_ap_evaluator(self):
"""build_ap_evaluator."""
self.ap_evaluator = APEvaluator(
self.n_classes,
conf_thres=self.nms_confidence_thresh,
matching_iou_threshold=self.matching_iou,
average_precision_mode=self.average_precision_mode
)
def build_loss_ops(self):
"""build loss ops."""
n_box, n_attr = self._val_model.layers[-1].output_shape[1:]
op_pred = tf.placeholder(tf.float32, shape=(None, n_box, n_attr))
op_true = tf.placeholder(tf.float32, shape=(None, n_box, n_attr - 6))
self.loss_ops = [op_true, op_pred, self.losses[0](op_true, op_pred)]
def build_validation_callback(
self,
val_dataset,
verbose=False
):
"""Build validation model."""
# build validation model
self.build_loss_ops()
self.build_ap_evaluator()
# build validation callback
if type(val_dataset) == YOLOv3DataPipe:
eval_callback = YOLOv3MetricCallback(
ap_evaluator=self.ap_evaluator,
built_eval_model=self.val_model,
generator=val_dataset.generator(),
classes=self.classes,
n_batches=val_dataset.n_batches,
loss_ops=self.loss_ops,
eval_model=self._val_model,
metric_interval=self.ckpt_interval,
last_epoch=self.num_epochs,
verbose=verbose
)
else:
eval_callback = DetectionMetricCallback(
ap_evaluator=self.ap_evaluator,
built_eval_model=self.val_model,
eval_sequence=val_dataset,
loss_ops=self.loss_ops,
eval_model=self._val_model,
metric_interval=self.ckpt_interval,
last_epoch=self.num_epochs,
verbose=verbose
)
return self.callbacks.append(eval_callback)
def build_savers(self, results_dir, verbose):
"""build several savers."""
if not os.path.exists(os.path.join(results_dir, 'weights')):
os.mkdir(os.path.join(results_dir, 'weights'))
ckpt_path = str(os.path.join(
results_dir,
'weights',
'yolov3_' + self.arch_name + '_epoch_{epoch:03d}.hdf5'
)
)
# checkpointer
self.build_checkpointer(ckpt_path, verbose)
# output label file
with open(os.path.join(results_dir, 'model_output_labels.txt'), 'w') as f:
f.write('\n'.join(self.classes))
csv_path = os.path.join(results_dir, 'yolov3_training_log_' + self.arch_name + '.csv')
# CSV logger
self.build_csvlogger(csv_path)
def build_tensorboard_callback(self, output_dir):
"""Build TensorBoard callback for visualization."""
tb_path = os.path.join(
output_dir,
"logs"
)
if os.path.exists(tb_path) and os.path.isdir(tb_path):
shutil.rmtree(tb_path)
if not os.path.exists(tb_path):
os.makedirs(tb_path)
tb_callback = TensorBoard(
log_dir=tb_path,
write_graph=False,
weight_hist=False
)
self.tb_callback = tb_callback
self.callbacks.append(tb_callback)
def build_status_logging_callback(self, results_dir, num_epochs, is_master):
"""Build status logging for TAO API."""
status_logger = TAOStatusLogger(
results_dir,
append=True,
num_epochs=num_epochs,
is_master=is_master,
)
self.callbacks.append(status_logger)
def compile(self):
'''compile the keras model.'''
self.keras_model.compile(
optimizer=self.optimizer,
loss=self.losses,
target_tensors=self.target_tensors
)
def summary(self):
"""print keras model summary."""
self.keras_model.summary()
def train(self, verbose=1):
"""training."""
if type(self.train_dataset) == YOLOv3DataPipe:
self.keras_model.fit(
epochs=self.num_epochs,
steps_per_epoch=self.iters_per_epoch,
callbacks=self.callbacks,
initial_epoch=self.init_epoch,
verbose=verbose
)
else:
# Use the patched fit_generator
# TensorBoard image summary only supports 8-bit images
if (self.tb_callback is not None) and (self.image_depth == 8):
writer = self.tb_callback.writer
else:
writer = None
default_img_mean = (103.939, 116.779, 123.68)
fit_generator(
self.keras_model,
writer,
img_means=self.augmentation_config.image_mean or default_img_mean,
max_image_num=self.spec.training_config.visualizer.num_images,
steps_per_epoch=self.iters_per_epoch,
generator=self.train_dataset,
epochs=self.num_epochs,
callbacks=self.callbacks,
initial_epoch=self.init_epoch,
workers=self.n_workers,
max_queue_size=self.max_queue_size,
verbose=verbose,
use_multiprocessing=self.use_mp,
shuffle=False
)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/yolo_v3/models/yolov3_model.py |
tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/yolo_v3/models/__init__.py |
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""IVA YOLO model construction wrapper functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from keras.layers import UpSampling2D
from nvidia_tao_tf1.core.templates.utils import _leaky_conv
from nvidia_tao_tf1.cv.common.models.backbones import get_backbone
def get_base_model(input_tensor,
arch,
nlayers,
kernel_regularizer=None,
bias_regularizer=None,
freeze_blocks=None,
freeze_bn=None,
force_relu=False):
'''Return feature maps for YOLOv3.
Args:
input_tensor: image tensor
arch: feature extractor arch
nlayers: arch layers
kernel_regularizer: kernel_regularizer
bias_regularizer: bias_regularizer
freeze_blocks: freeze_blocks
freeze_bn: freeze_bn
force_relu: Replace LeakyReLU with ReLU.
Returns:
the return is two tuples. First one is three tensors for three feature layers. second one is
two integer tuple, corresponding to upsample0 and upsample1 num_filters.
'''
base_model = get_backbone(input_tensor,
arch,
data_format='channels_first',
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
freeze_blocks=freeze_blocks,
freeze_bn=freeze_bn,
nlayers=nlayers,
use_batch_norm=True,
use_pooling=False,
use_bias=False,
all_projections=True,
dropout=1e-3,
force_relu=force_relu)
def additional_conv(nchannels):
return _leaky_conv(base_model.layers[-1].output,
nchannels, alpha=0.1, kernel=3, strides=2,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
use_batch_norm=True, force_relu=force_relu,
name='yolo_expand_conv1')
if arch == 'resnet':
if nlayers == 10:
fmaps = (additional_conv(512),
base_model.layers[-10].output, base_model.layers[-19].output)
map_size = (128, 64)
elif nlayers == 18:
fmaps = (additional_conv(512),
base_model.layers[-19].output, base_model.layers[-37].output)
map_size = (128, 64)
elif nlayers == 34:
fmaps = (additional_conv(512),
base_model.layers[-28].output, base_model.layers[-82].output)
map_size = (128, 64)
elif nlayers == 50:
# Extract layers[-43] as the feature layer for large feature map (to detect sm object)
lg_map = base_model.layers[-43].output
lg_map = UpSampling2D(2, data_format='channels_first', name='expand_upsample')(lg_map)
fmaps = (additional_conv(1024), base_model.layers[-7].output, lg_map)
map_size = (256, 128)
elif nlayers == 101:
# Extract layers[-43] as the feature layer for large feature map (to detect sm object)
# there's too many stride 16 layers. We take one and upsample it to stride 8.
lg_map = base_model.layers[-43].output
lg_map = UpSampling2D(2, data_format='channels_first', name='expand_upsample')(lg_map)
fmaps = (additional_conv(1024), base_model.layers[-7].output, lg_map)
map_size = (256, 128)
else:
raise ValueError("ResNet-{} architecture is currently not implemented\n"
"Please choose out of the following:\n{}.".
format(nlayers, '10, 18, 34, 50, 101'))
elif arch == 'vgg':
if nlayers == 16:
fmaps = (base_model.layers[-1].output, base_model.layers[-10].output,
base_model.layers[-19].output)
map_size = (256, 128)
elif nlayers == 19:
fmaps = (base_model.layers[-1].output, base_model.layers[-13].output,
base_model.layers[-25].output)
map_size = (256, 128)
else:
raise ValueError("ResNet-{} architecture is currently not implemented\n"
"Please choose out of the following:\n{}.".
format(nlayers, '16, 19'))
elif arch == 'efficientnet_b0':
lg_map = base_model.get_layer('block5a_expand_activation').output
lg_map = UpSampling2D(2, data_format='channels_first', name='expand_upsample')(lg_map)
z = _leaky_conv(lg_map, 256, alpha=0.1, kernel=3,
strides=1, kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer, use_batch_norm=True,
force_relu=force_relu, name='expand_conv3')
fmaps = (base_model.layers[-1].output,
base_model.get_layer('block6a_expand_activation').output,
z)
map_size = (256, 128)
elif arch == 'darknet':
if nlayers == 19:
fmaps = (base_model.layers[-1].output, base_model.layers[-17].output,
base_model.layers[-33].output)
map_size = (256, 128)
elif nlayers == 53:
fmaps = (base_model.layers[-1].output, base_model.layers[-32].output,
base_model.layers[-91].output)
map_size = (256, 128)
else:
raise ValueError("DarkNet-{} architecture is currently not implemented\n"
"Please choose out of the following:\n{}.".
format(nlayers, '19, 53'))
elif arch == 'mobilenet_v1':
fmaps = (additional_conv(512), base_model.layers[-39].output, base_model.layers[-53].output)
map_size = (128, 64)
elif arch == 'mobilenet_v2':
fmaps = (additional_conv(96), base_model.layers[-32].output, base_model.layers[-74].output)
map_size = (32, 16)
elif arch == 'squeezenet':
# Quite a bit work here...
x = additional_conv(128)
y = _leaky_conv(base_model.layers[-1].output, 128, alpha=0.1, kernel=1,
strides=1, kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer, use_batch_norm=True,
force_relu=force_relu, name='expand_conv2')
z = _leaky_conv(base_model.layers[-9].output, 128, alpha=0.1, kernel=1,
strides=1, kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer, use_batch_norm=True,
force_relu=force_relu, name='expand_conv3')
fmaps = (x, y, z)
map_size = (64, 64)
elif arch == 'googlenet':
# Quite a bit work here...
x = additional_conv(1024)
y = _leaky_conv(base_model.layers[-21].output, 512, alpha=0.1, kernel=3,
strides=1, kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer, use_batch_norm=True,
force_relu=force_relu, name='expand_conv2')
lg_map = base_model.layers[-41].output
lg_map = UpSampling2D(2, data_format='channels_first', name='expand_upsample')(lg_map)
z = _leaky_conv(lg_map, 256, alpha=0.1, kernel=3,
strides=1, kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer, use_batch_norm=True,
force_relu=force_relu, name='expand_conv3')
fmaps = (x, y, z)
map_size = (256, 128)
else:
raise ValueError("{} architecture is currently not implemented\n".
format(arch))
return fmaps, map_size
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/yolo_v3/models/base_model.py |
"""Utils to build the model, data loader and entire pipeline."""
from nvidia_tao_tf1.cv.common.mlops.clearml import get_clearml_task
from nvidia_tao_tf1.cv.common.mlops.wandb import check_wandb_logged_in, initialize_wandb
from nvidia_tao_tf1.cv.common.visualizer.tensorboard_visualizer import TensorBoardVisualizer
from nvidia_tao_tf1.cv.yolo_v3.data_loader.data_loader import YOLOv3DataPipe
from nvidia_tao_tf1.cv.yolo_v3.dataio.data_sequence import YOLOv3DataSequence
from nvidia_tao_tf1.cv.yolo_v3.dataio.input_encoder import (
YOLOv3InputEncoder,
YOLOv3InputEncoderTensor
)
from nvidia_tao_tf1.cv.yolo_v3.models.yolov3_model import YOLOv3Model
def build_training_pipeline(spec, results_dir, key, hvd, sess, verbose):
"""Build the training pipeline."""
# Define visualizer
visualizer = TensorBoardVisualizer()
visualizer.build_from_config(
spec.training_config.visualizer
)
visualizer_config = spec.training_config.visualizer
is_master = hvd.rank() == 0
if is_master and visualizer_config.HasField("clearml_config"):
clearml_config = visualizer_config.clearml_config
get_clearml_task(clearml_config, "yolo_v3")
if is_master and visualizer_config.HasField("wandb_config"):
wandb_config = visualizer_config.wandb_config
wandb_logged_in = check_wandb_logged_in()
wandb_name = f"{wandb_config.name}" if wandb_config.name else \
"yolov3_training"
initialize_wandb(
project=wandb_config.project if wandb_config.project else None,
entity=wandb_config.entity if wandb_config.entity else None,
notes=wandb_config.notes if wandb_config.notes else None,
tags=wandb_config.tags if wandb_config.tags else None,
sync_tensorboard=True,
save_code=False,
results_dir=results_dir,
wandb_logged_in=wandb_logged_in,
name=wandb_name
)
# instantiate the model
yolov3 = YOLOv3Model(
spec,
key
)
train_encoder = YOLOv3InputEncoder(
yolov3.n_classes,
yolov3.val_fmap_stride,
yolov3.all_relative_anchors
)
def eval_encode_fn(output_img_size, gt_label):
return (train_encoder(output_img_size, gt_label), gt_label)
if yolov3.train_labels_format == "tfrecords":
# tfrecord data loader
train_dataset = YOLOv3DataPipe(
spec,
label_encoder=None,
training=True,
h_tensor=yolov3.h_tensor,
w_tensor=yolov3.w_tensor,
visualizer=visualizer,
rank=hvd.rank()
)
yolov3.train_dataset = train_dataset
# build the training model
yolov3.build_training_model(hvd)
# setup target tensors
yolo_input_encoder = \
YOLOv3InputEncoderTensor(
img_height=yolov3.h_tensor,
img_width=yolov3.w_tensor,
n_classes=yolov3.n_classes,
feature_map_size=yolov3.predictor_sizes,
anchors=yolov3.all_relative_anchors
)
train_dataset.set_encoder(yolo_input_encoder)
yolov3.set_target_tensors(train_dataset.encoded_labels)
else:
# keras sequence data loader
train_sequence = YOLOv3DataSequence(
spec.dataset_config,
spec.augmentation_config,
spec.training_config.batch_size_per_gpu,
is_training=True,
encode_fn=train_encoder,
output_raw_label=spec.training_config.visualizer.enabled
)
yolov3.train_dataset = train_sequence
# build the training model
yolov3.build_training_model(hvd)
# Visualize model weights histogram
if hvd.rank() == 0 and spec.training_config.visualizer.enabled:
visualizer.keras_model_weight_histogram(yolov3.keras_model)
# setup optimizer, if any
yolov3.build_optimizer(hvd)
# buld loss functions
yolov3.build_losses()
# build callbacks
yolov3.build_hvd_callbacks(hvd)
# build learning rate scheduler
yolov3.build_lr_scheduler(yolov3.train_dataset, hvd)
# build validation callback
if yolov3.val_labels_format == "tfrecords":
val_dataset = YOLOv3DataPipe(
spec,
training=False,
sess=sess,
h_tensor=yolov3.h_tensor_val,
w_tensor=yolov3.w_tensor_val
)
yolov3.val_dataset = val_dataset
yolov3.build_validation_model()
val_input_encoder = \
YOLOv3InputEncoderTensor(
img_height=yolov3.h_tensor_val,
img_width=yolov3.w_tensor_val,
n_classes=yolov3.n_classes,
feature_map_size=yolov3.val_predictor_sizes,
anchors=yolov3.all_relative_anchors
)
val_dataset.set_encoder(val_input_encoder)
yolov3.build_validation_callback(
val_dataset,
verbose=verbose
)
else:
yolov3.build_validation_model()
eval_sequence = YOLOv3DataSequence(
spec.dataset_config,
spec.augmentation_config,
spec.eval_config.batch_size,
is_training=False,
encode_fn=eval_encode_fn
)
yolov3.val_dataset = eval_sequence
yolov3.build_validation_callback(
eval_sequence,
verbose=verbose
)
# build checkpointer
if hvd.rank() == 0:
yolov3.build_savers(results_dir, verbose)
if spec.training_config.visualizer.enabled:
yolov3.build_tensorboard_callback(results_dir)
yolov3.build_status_logging_callback(results_dir, yolov3.num_epochs, True)
yolov3.compile()
return yolov3
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/yolo_v3/models/utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""test yolo models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from keras import backend as K
from keras.layers import Input
from keras.models import Model
import numpy as np
import pytest
import tensorflow as tf
from nvidia_tao_tf1.cv.yolo_v3.models.base_model import get_base_model
# Limit keras to using only 1 gpu of gpu id.
gpu_id = str(0)
# Restricting the number of GPU's to be used.
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.gpu_options.visible_device_list = gpu_id
K.set_session(tf.Session(config=config))
def do_model_pred(input_shape, arch, nlayers, batch_size=2):
x = Input(shape=input_shape)
fmaps, map_size = get_base_model(x, arch, nlayers)
model = Model(inputs=x, outputs=fmaps)
x_in = np.random.normal(size=(batch_size, ) + input_shape)
pred = model.predict(x_in)
stride = 32
assert len(map_size) == 2
assert len(pred) == 3
if arch == 'vgg':
stride = 16
# assert pred 0
assert pred[0].shape[0] == batch_size
assert pred[0].shape[2] == input_shape[-1] / stride
assert pred[0].shape[3] == input_shape[-1] / stride
# assert pred 1
assert pred[1].shape[0] == batch_size
assert pred[1].shape[2] == input_shape[-1] / (stride / 2)
assert pred[1].shape[3] == input_shape[-1] / (stride / 2)
# assert pred 2
assert pred[2].shape[0] == batch_size
assert pred[2].shape[2] == input_shape[-1] / (stride / 4)
assert pred[2].shape[3] == input_shape[-1] / (stride / 4)
def test_all_base_models():
# let's give a large coef on loss
for arch in ['resnet', 'vgg', 'squeezenet', 'darknet', 'mobilenet_v1', 'mobilenet_v2',
'googlenet', 'efficientnet_b0', 'wrong_net']:
for nlayers in [10, 16, 18, 19, 34, 50, 53, 101]:
if arch in ['squeezenet', 'googlenet', 'mobilenet_v1',
'efficientnet_b0'] and nlayers > 10:
# Use mobilenet_v2 to test nlayers invariant
continue
resnet_flag = (arch == 'resnet' and nlayers not in [10, 18, 34, 50, 101])
vgg_flag = (arch == 'vgg' and nlayers not in [16, 19])
darknet_flag = (arch == 'darknet' and nlayers not in [19, 53])
if resnet_flag or vgg_flag or darknet_flag or arch == 'wrong_net':
with pytest.raises((ValueError, NotImplementedError)):
do_model_pred((3, 64, 64), arch, nlayers)
else:
do_model_pred((3, 64, 64), arch, nlayers)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/yolo_v3/models/tests/test_model.py |
tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/yolo_v3/architecture/__init__.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.