python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
---|---|---|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nvidia_tao_deploy/cv/detectnet_v2/proto/inferencer_config.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='nvidia_tao_deploy/cv/detectnet_v2/proto/inferencer_config.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n?nvidia_tao_deploy/cv/detectnet_v2/proto/inferencer_config.proto\"`\n\x10\x43\x61libratorConfig\x12\x19\n\x11\x63\x61libration_cache\x18\x01 \x01(\t\x12\x1e\n\x16\x63\x61libration_tensorfile\x18\x02 \x01(\t\x12\x11\n\tn_batches\x18\x03 \x01(\x05\"\x1a\n\tTLTConfig\x12\r\n\x05model\x18\x01 \x01(\t\"\xf1\x02\n\x0eTensorRTConfig\x12&\n\x06parser\x18\x01 \x01(\x0e\x32\x16.TensorRTConfig.Parser\x12\x12\n\ncaffemodel\x18\x02 \x01(\t\x12\x10\n\x08prototxt\x18\x03 \x01(\t\x12\x11\n\tuff_model\x18\x04 \x01(\t\x12\x12\n\netlt_model\x18\x05 \x01(\t\x12:\n\x11\x62\x61\x63kend_data_type\x18\x06 \x01(\x0e\x32\x1f.TensorRTConfig.BackendDataType\x12\x13\n\x0bsave_engine\x18\x07 \x01(\x08\x12\x12\n\ntrt_engine\x18\x08 \x01(\t\x12,\n\x11\x63\x61librator_config\x18\t \x01(\x0b\x32\x11.CalibratorConfig\"&\n\x06Parser\x12\t\n\x05\x43\x41\x46\x46\x45\x10\x00\x12\x07\n\x03UFF\x10\x01\x12\x08\n\x04\x45TLT\x10\x02\"/\n\x0f\x42\x61\x63kendDataType\x12\x08\n\x04\x46P32\x10\x00\x12\x08\n\x04\x46P16\x10\x01\x12\x08\n\x04INT8\x10\x02\"\xb2\x02\n\x10InferencerConfig\x12 \n\ntlt_config\x18\x01 \x01(\x0b\x32\n.TLTConfigH\x00\x12*\n\x0ftensorrt_config\x18\x02 \x01(\x0b\x32\x0f.TensorRTConfigH\x00\x12\x13\n\x0binput_nodes\x18\x03 \x03(\t\x12\x14\n\x0coutput_nodes\x18\x04 \x03(\t\x12\x12\n\nbatch_size\x18\x05 \x01(\x05\x12\x14\n\x0cimage_height\x18\x06 \x01(\x05\x12\x13\n\x0bimage_width\x18\x07 \x01(\x05\x12\x16\n\x0eimage_channels\x18\x08 \x01(\x05\x12\x11\n\tgpu_index\x18\t \x01(\x05\x12\x16\n\x0etarget_classes\x18\n \x03(\t\x12\x0e\n\x06stride\x18\x0b \x01(\x05\x42\x13\n\x11model_config_typeb\x06proto3')
)
_TENSORRTCONFIG_PARSER = _descriptor.EnumDescriptor(
name='Parser',
full_name='TensorRTConfig.Parser',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='CAFFE', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='UFF', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ETLT', index=2, number=2,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=476,
serialized_end=514,
)
_sym_db.RegisterEnumDescriptor(_TENSORRTCONFIG_PARSER)
_TENSORRTCONFIG_BACKENDDATATYPE = _descriptor.EnumDescriptor(
name='BackendDataType',
full_name='TensorRTConfig.BackendDataType',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='FP32', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='FP16', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INT8', index=2, number=2,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=516,
serialized_end=563,
)
_sym_db.RegisterEnumDescriptor(_TENSORRTCONFIG_BACKENDDATATYPE)
_CALIBRATORCONFIG = _descriptor.Descriptor(
name='CalibratorConfig',
full_name='CalibratorConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='calibration_cache', full_name='CalibratorConfig.calibration_cache', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='calibration_tensorfile', full_name='CalibratorConfig.calibration_tensorfile', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='n_batches', full_name='CalibratorConfig.n_batches', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=67,
serialized_end=163,
)
_TLTCONFIG = _descriptor.Descriptor(
name='TLTConfig',
full_name='TLTConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='model', full_name='TLTConfig.model', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=165,
serialized_end=191,
)
_TENSORRTCONFIG = _descriptor.Descriptor(
name='TensorRTConfig',
full_name='TensorRTConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='parser', full_name='TensorRTConfig.parser', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='caffemodel', full_name='TensorRTConfig.caffemodel', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='prototxt', full_name='TensorRTConfig.prototxt', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='uff_model', full_name='TensorRTConfig.uff_model', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='etlt_model', full_name='TensorRTConfig.etlt_model', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='backend_data_type', full_name='TensorRTConfig.backend_data_type', index=5,
number=6, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='save_engine', full_name='TensorRTConfig.save_engine', index=6,
number=7, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='trt_engine', full_name='TensorRTConfig.trt_engine', index=7,
number=8, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='calibrator_config', full_name='TensorRTConfig.calibrator_config', index=8,
number=9, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
_TENSORRTCONFIG_PARSER,
_TENSORRTCONFIG_BACKENDDATATYPE,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=194,
serialized_end=563,
)
_INFERENCERCONFIG = _descriptor.Descriptor(
name='InferencerConfig',
full_name='InferencerConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='tlt_config', full_name='InferencerConfig.tlt_config', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='tensorrt_config', full_name='InferencerConfig.tensorrt_config', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='input_nodes', full_name='InferencerConfig.input_nodes', index=2,
number=3, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='output_nodes', full_name='InferencerConfig.output_nodes', index=3,
number=4, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='batch_size', full_name='InferencerConfig.batch_size', index=4,
number=5, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='image_height', full_name='InferencerConfig.image_height', index=5,
number=6, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='image_width', full_name='InferencerConfig.image_width', index=6,
number=7, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='image_channels', full_name='InferencerConfig.image_channels', index=7,
number=8, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='gpu_index', full_name='InferencerConfig.gpu_index', index=8,
number=9, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='target_classes', full_name='InferencerConfig.target_classes', index=9,
number=10, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='stride', full_name='InferencerConfig.stride', index=10,
number=11, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='model_config_type', full_name='InferencerConfig.model_config_type',
index=0, containing_type=None, fields=[]),
],
serialized_start=566,
serialized_end=872,
)
_TENSORRTCONFIG.fields_by_name['parser'].enum_type = _TENSORRTCONFIG_PARSER
_TENSORRTCONFIG.fields_by_name['backend_data_type'].enum_type = _TENSORRTCONFIG_BACKENDDATATYPE
_TENSORRTCONFIG.fields_by_name['calibrator_config'].message_type = _CALIBRATORCONFIG
_TENSORRTCONFIG_PARSER.containing_type = _TENSORRTCONFIG
_TENSORRTCONFIG_BACKENDDATATYPE.containing_type = _TENSORRTCONFIG
_INFERENCERCONFIG.fields_by_name['tlt_config'].message_type = _TLTCONFIG
_INFERENCERCONFIG.fields_by_name['tensorrt_config'].message_type = _TENSORRTCONFIG
_INFERENCERCONFIG.oneofs_by_name['model_config_type'].fields.append(
_INFERENCERCONFIG.fields_by_name['tlt_config'])
_INFERENCERCONFIG.fields_by_name['tlt_config'].containing_oneof = _INFERENCERCONFIG.oneofs_by_name['model_config_type']
_INFERENCERCONFIG.oneofs_by_name['model_config_type'].fields.append(
_INFERENCERCONFIG.fields_by_name['tensorrt_config'])
_INFERENCERCONFIG.fields_by_name['tensorrt_config'].containing_oneof = _INFERENCERCONFIG.oneofs_by_name['model_config_type']
DESCRIPTOR.message_types_by_name['CalibratorConfig'] = _CALIBRATORCONFIG
DESCRIPTOR.message_types_by_name['TLTConfig'] = _TLTCONFIG
DESCRIPTOR.message_types_by_name['TensorRTConfig'] = _TENSORRTCONFIG
DESCRIPTOR.message_types_by_name['InferencerConfig'] = _INFERENCERCONFIG
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
CalibratorConfig = _reflection.GeneratedProtocolMessageType('CalibratorConfig', (_message.Message,), dict(
DESCRIPTOR = _CALIBRATORCONFIG,
__module__ = 'nvidia_tao_deploy.cv.detectnet_v2.proto.inferencer_config_pb2'
# @@protoc_insertion_point(class_scope:CalibratorConfig)
))
_sym_db.RegisterMessage(CalibratorConfig)
TLTConfig = _reflection.GeneratedProtocolMessageType('TLTConfig', (_message.Message,), dict(
DESCRIPTOR = _TLTCONFIG,
__module__ = 'nvidia_tao_deploy.cv.detectnet_v2.proto.inferencer_config_pb2'
# @@protoc_insertion_point(class_scope:TLTConfig)
))
_sym_db.RegisterMessage(TLTConfig)
TensorRTConfig = _reflection.GeneratedProtocolMessageType('TensorRTConfig', (_message.Message,), dict(
DESCRIPTOR = _TENSORRTCONFIG,
__module__ = 'nvidia_tao_deploy.cv.detectnet_v2.proto.inferencer_config_pb2'
# @@protoc_insertion_point(class_scope:TensorRTConfig)
))
_sym_db.RegisterMessage(TensorRTConfig)
InferencerConfig = _reflection.GeneratedProtocolMessageType('InferencerConfig', (_message.Message,), dict(
DESCRIPTOR = _INFERENCERCONFIG,
__module__ = 'nvidia_tao_deploy.cv.detectnet_v2.proto.inferencer_config_pb2'
# @@protoc_insertion_point(class_scope:InferencerConfig)
))
_sym_db.RegisterMessage(InferencerConfig)
# @@protoc_insertion_point(module_scope)
| tao_deploy-main | nvidia_tao_deploy/cv/detectnet_v2/proto/inferencer_config_pb2.py |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nvidia_tao_deploy/cv/detectnet_v2/proto/cost_function_config.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='nvidia_tao_deploy/cv/detectnet_v2/proto/cost_function_config.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\nBnvidia_tao_deploy/cv/detectnet_v2/proto/cost_function_config.proto\"\x88\x03\n\x12\x43ostFunctionConfig\x12\x37\n\x0etarget_classes\x18\x01 \x03(\x0b\x32\x1f.CostFunctionConfig.TargetClass\x12\x1c\n\x14\x65nable_autoweighting\x18\x02 \x01(\x08\x12\x1c\n\x14max_objective_weight\x18\x03 \x01(\x02\x12\x1c\n\x14min_objective_weight\x18\x04 \x01(\x02\x1a\xde\x01\n\x0bTargetClass\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x14\n\x0c\x63lass_weight\x18\x02 \x01(\x02\x12\"\n\x1a\x63overage_foreground_weight\x18\x03 \x01(\x02\x12=\n\nobjectives\x18\x04 \x03(\x0b\x32).CostFunctionConfig.TargetClass.Objective\x1aH\n\tObjective\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x16\n\x0einitial_weight\x18\x02 \x01(\x02\x12\x15\n\rweight_target\x18\x03 \x01(\x02\x62\x06proto3')
)
_COSTFUNCTIONCONFIG_TARGETCLASS_OBJECTIVE = _descriptor.Descriptor(
name='Objective',
full_name='CostFunctionConfig.TargetClass.Objective',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='CostFunctionConfig.TargetClass.Objective.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='initial_weight', full_name='CostFunctionConfig.TargetClass.Objective.initial_weight', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='weight_target', full_name='CostFunctionConfig.TargetClass.Objective.weight_target', index=2,
number=3, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=391,
serialized_end=463,
)
_COSTFUNCTIONCONFIG_TARGETCLASS = _descriptor.Descriptor(
name='TargetClass',
full_name='CostFunctionConfig.TargetClass',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='CostFunctionConfig.TargetClass.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='class_weight', full_name='CostFunctionConfig.TargetClass.class_weight', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='coverage_foreground_weight', full_name='CostFunctionConfig.TargetClass.coverage_foreground_weight', index=2,
number=3, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='objectives', full_name='CostFunctionConfig.TargetClass.objectives', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_COSTFUNCTIONCONFIG_TARGETCLASS_OBJECTIVE, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=241,
serialized_end=463,
)
_COSTFUNCTIONCONFIG = _descriptor.Descriptor(
name='CostFunctionConfig',
full_name='CostFunctionConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='target_classes', full_name='CostFunctionConfig.target_classes', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='enable_autoweighting', full_name='CostFunctionConfig.enable_autoweighting', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='max_objective_weight', full_name='CostFunctionConfig.max_objective_weight', index=2,
number=3, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='min_objective_weight', full_name='CostFunctionConfig.min_objective_weight', index=3,
number=4, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_COSTFUNCTIONCONFIG_TARGETCLASS, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=71,
serialized_end=463,
)
_COSTFUNCTIONCONFIG_TARGETCLASS_OBJECTIVE.containing_type = _COSTFUNCTIONCONFIG_TARGETCLASS
_COSTFUNCTIONCONFIG_TARGETCLASS.fields_by_name['objectives'].message_type = _COSTFUNCTIONCONFIG_TARGETCLASS_OBJECTIVE
_COSTFUNCTIONCONFIG_TARGETCLASS.containing_type = _COSTFUNCTIONCONFIG
_COSTFUNCTIONCONFIG.fields_by_name['target_classes'].message_type = _COSTFUNCTIONCONFIG_TARGETCLASS
DESCRIPTOR.message_types_by_name['CostFunctionConfig'] = _COSTFUNCTIONCONFIG
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
CostFunctionConfig = _reflection.GeneratedProtocolMessageType('CostFunctionConfig', (_message.Message,), dict(
TargetClass = _reflection.GeneratedProtocolMessageType('TargetClass', (_message.Message,), dict(
Objective = _reflection.GeneratedProtocolMessageType('Objective', (_message.Message,), dict(
DESCRIPTOR = _COSTFUNCTIONCONFIG_TARGETCLASS_OBJECTIVE,
__module__ = 'nvidia_tao_deploy.cv.detectnet_v2.proto.cost_function_config_pb2'
# @@protoc_insertion_point(class_scope:CostFunctionConfig.TargetClass.Objective)
))
,
DESCRIPTOR = _COSTFUNCTIONCONFIG_TARGETCLASS,
__module__ = 'nvidia_tao_deploy.cv.detectnet_v2.proto.cost_function_config_pb2'
# @@protoc_insertion_point(class_scope:CostFunctionConfig.TargetClass)
))
,
DESCRIPTOR = _COSTFUNCTIONCONFIG,
__module__ = 'nvidia_tao_deploy.cv.detectnet_v2.proto.cost_function_config_pb2'
# @@protoc_insertion_point(class_scope:CostFunctionConfig)
))
_sym_db.RegisterMessage(CostFunctionConfig)
_sym_db.RegisterMessage(CostFunctionConfig.TargetClass)
_sym_db.RegisterMessage(CostFunctionConfig.TargetClass.Objective)
# @@protoc_insertion_point(module_scope)
| tao_deploy-main | nvidia_tao_deploy/cv/detectnet_v2/proto/cost_function_config_pb2.py |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nvidia_tao_deploy/cv/detectnet_v2/proto/model_config.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='nvidia_tao_deploy/cv/detectnet_v2/proto/model_config.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n:nvidia_tao_deploy/cv/detectnet_v2/proto/model_config.proto\"\xd9\x07\n\x0bModelConfig\x12\x1d\n\x15pretrained_model_file\x18\x01 \x01(\t\x12 \n\x18\x66reeze_pretrained_layers\x18\x02 \x01(\x08\x12\'\n\x1f\x61llow_loaded_model_modification\x18\x03 \x01(\x08\x12\x12\n\nnum_layers\x18\x04 \x01(\x05\x12\x13\n\x0buse_pooling\x18\x05 \x01(\x08\x12\x16\n\x0euse_batch_norm\x18\x06 \x01(\x08\x12\x14\n\x0c\x64ropout_rate\x18\x07 \x01(\x02\x12+\n\nactivation\x18\x08 \x01(\x0b\x32\x17.ModelConfig.Activation\x12\x30\n\robjective_set\x18\t \x01(\x0b\x32\x19.ModelConfig.ObjectiveSet\x12:\n\x12training_precision\x18\n \x01(\x0b\x32\x1e.ModelConfig.TrainingPrecision\x12\x11\n\tfreeze_bn\x18\x0b \x01(\x08\x12\x15\n\rfreeze_blocks\x18\x0c \x03(\x02\x12\x0c\n\x04\x61rch\x18\r \x01(\t\x12\x12\n\nload_graph\x18\x0e \x01(\x08\x12\x17\n\x0f\x61ll_projections\x18\x0f \x01(\x08\x1a\xb4\x01\n\nActivation\x12\x17\n\x0f\x61\x63tivation_type\x18\x01 \x01(\t\x12P\n\x15\x61\x63tivation_parameters\x18\x02 \x03(\x0b\x32\x31.ModelConfig.Activation.ActivationParametersEntry\x1a;\n\x19\x41\x63tivationParametersEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x02:\x02\x38\x01\x1a=\n\rBboxObjective\x12\r\n\x05input\x18\x01 \x01(\t\x12\r\n\x05scale\x18\x02 \x01(\x02\x12\x0e\n\x06offset\x18\x03 \x01(\x02\x1a\x1d\n\x0c\x43ovObjective\x12\r\n\x05input\x18\x01 \x01(\t\x1a`\n\x0cObjectiveSet\x12(\n\x04\x62\x62ox\x18\x01 \x01(\x0b\x32\x1a.ModelConfig.BboxObjective\x12&\n\x03\x63ov\x18\x02 \x01(\x0b\x32\x19.ModelConfig.CovObjective\x1a\x91\x01\n\x11TrainingPrecision\x12\x44\n\x0e\x62\x61\x63kend_floatx\x18\x01 \x01(\x0e\x32,.ModelConfig.TrainingPrecision.BackendFloatx\"6\n\rBackendFloatx\x12\x0b\n\x07\x46LOAT32\x10\x00\x12\x0b\n\x07\x46LOAT16\x10\x01\x12\x0b\n\x07INVALID\x10\x02\x62\x06proto3')
)
_MODELCONFIG_TRAININGPRECISION_BACKENDFLOATX = _descriptor.EnumDescriptor(
name='BackendFloatx',
full_name='ModelConfig.TrainingPrecision.BackendFloatx',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='FLOAT32', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='FLOAT16', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INVALID', index=2, number=2,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=994,
serialized_end=1048,
)
_sym_db.RegisterEnumDescriptor(_MODELCONFIG_TRAININGPRECISION_BACKENDFLOATX)
_MODELCONFIG_ACTIVATION_ACTIVATIONPARAMETERSENTRY = _descriptor.Descriptor(
name='ActivationParametersEntry',
full_name='ModelConfig.Activation.ActivationParametersEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='ModelConfig.Activation.ActivationParametersEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='ModelConfig.Activation.ActivationParametersEntry.value', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=_b('8\001'),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=649,
serialized_end=708,
)
_MODELCONFIG_ACTIVATION = _descriptor.Descriptor(
name='Activation',
full_name='ModelConfig.Activation',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='activation_type', full_name='ModelConfig.Activation.activation_type', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='activation_parameters', full_name='ModelConfig.Activation.activation_parameters', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_MODELCONFIG_ACTIVATION_ACTIVATIONPARAMETERSENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=528,
serialized_end=708,
)
_MODELCONFIG_BBOXOBJECTIVE = _descriptor.Descriptor(
name='BboxObjective',
full_name='ModelConfig.BboxObjective',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='input', full_name='ModelConfig.BboxObjective.input', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='scale', full_name='ModelConfig.BboxObjective.scale', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='offset', full_name='ModelConfig.BboxObjective.offset', index=2,
number=3, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=710,
serialized_end=771,
)
_MODELCONFIG_COVOBJECTIVE = _descriptor.Descriptor(
name='CovObjective',
full_name='ModelConfig.CovObjective',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='input', full_name='ModelConfig.CovObjective.input', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=773,
serialized_end=802,
)
_MODELCONFIG_OBJECTIVESET = _descriptor.Descriptor(
name='ObjectiveSet',
full_name='ModelConfig.ObjectiveSet',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='bbox', full_name='ModelConfig.ObjectiveSet.bbox', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='cov', full_name='ModelConfig.ObjectiveSet.cov', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=804,
serialized_end=900,
)
_MODELCONFIG_TRAININGPRECISION = _descriptor.Descriptor(
name='TrainingPrecision',
full_name='ModelConfig.TrainingPrecision',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='backend_floatx', full_name='ModelConfig.TrainingPrecision.backend_floatx', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
_MODELCONFIG_TRAININGPRECISION_BACKENDFLOATX,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=903,
serialized_end=1048,
)
_MODELCONFIG = _descriptor.Descriptor(
name='ModelConfig',
full_name='ModelConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='pretrained_model_file', full_name='ModelConfig.pretrained_model_file', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='freeze_pretrained_layers', full_name='ModelConfig.freeze_pretrained_layers', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='allow_loaded_model_modification', full_name='ModelConfig.allow_loaded_model_modification', index=2,
number=3, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='num_layers', full_name='ModelConfig.num_layers', index=3,
number=4, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='use_pooling', full_name='ModelConfig.use_pooling', index=4,
number=5, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='use_batch_norm', full_name='ModelConfig.use_batch_norm', index=5,
number=6, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='dropout_rate', full_name='ModelConfig.dropout_rate', index=6,
number=7, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='activation', full_name='ModelConfig.activation', index=7,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='objective_set', full_name='ModelConfig.objective_set', index=8,
number=9, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='training_precision', full_name='ModelConfig.training_precision', index=9,
number=10, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='freeze_bn', full_name='ModelConfig.freeze_bn', index=10,
number=11, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='freeze_blocks', full_name='ModelConfig.freeze_blocks', index=11,
number=12, type=2, cpp_type=6, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='arch', full_name='ModelConfig.arch', index=12,
number=13, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='load_graph', full_name='ModelConfig.load_graph', index=13,
number=14, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='all_projections', full_name='ModelConfig.all_projections', index=14,
number=15, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_MODELCONFIG_ACTIVATION, _MODELCONFIG_BBOXOBJECTIVE, _MODELCONFIG_COVOBJECTIVE, _MODELCONFIG_OBJECTIVESET, _MODELCONFIG_TRAININGPRECISION, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=63,
serialized_end=1048,
)
_MODELCONFIG_ACTIVATION_ACTIVATIONPARAMETERSENTRY.containing_type = _MODELCONFIG_ACTIVATION
_MODELCONFIG_ACTIVATION.fields_by_name['activation_parameters'].message_type = _MODELCONFIG_ACTIVATION_ACTIVATIONPARAMETERSENTRY
_MODELCONFIG_ACTIVATION.containing_type = _MODELCONFIG
_MODELCONFIG_BBOXOBJECTIVE.containing_type = _MODELCONFIG
_MODELCONFIG_COVOBJECTIVE.containing_type = _MODELCONFIG
_MODELCONFIG_OBJECTIVESET.fields_by_name['bbox'].message_type = _MODELCONFIG_BBOXOBJECTIVE
_MODELCONFIG_OBJECTIVESET.fields_by_name['cov'].message_type = _MODELCONFIG_COVOBJECTIVE
_MODELCONFIG_OBJECTIVESET.containing_type = _MODELCONFIG
_MODELCONFIG_TRAININGPRECISION.fields_by_name['backend_floatx'].enum_type = _MODELCONFIG_TRAININGPRECISION_BACKENDFLOATX
_MODELCONFIG_TRAININGPRECISION.containing_type = _MODELCONFIG
_MODELCONFIG_TRAININGPRECISION_BACKENDFLOATX.containing_type = _MODELCONFIG_TRAININGPRECISION
_MODELCONFIG.fields_by_name['activation'].message_type = _MODELCONFIG_ACTIVATION
_MODELCONFIG.fields_by_name['objective_set'].message_type = _MODELCONFIG_OBJECTIVESET
_MODELCONFIG.fields_by_name['training_precision'].message_type = _MODELCONFIG_TRAININGPRECISION
DESCRIPTOR.message_types_by_name['ModelConfig'] = _MODELCONFIG
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
ModelConfig = _reflection.GeneratedProtocolMessageType('ModelConfig', (_message.Message,), dict(
Activation = _reflection.GeneratedProtocolMessageType('Activation', (_message.Message,), dict(
ActivationParametersEntry = _reflection.GeneratedProtocolMessageType('ActivationParametersEntry', (_message.Message,), dict(
DESCRIPTOR = _MODELCONFIG_ACTIVATION_ACTIVATIONPARAMETERSENTRY,
__module__ = 'nvidia_tao_deploy.cv.detectnet_v2.proto.model_config_pb2'
# @@protoc_insertion_point(class_scope:ModelConfig.Activation.ActivationParametersEntry)
))
,
DESCRIPTOR = _MODELCONFIG_ACTIVATION,
__module__ = 'nvidia_tao_deploy.cv.detectnet_v2.proto.model_config_pb2'
# @@protoc_insertion_point(class_scope:ModelConfig.Activation)
))
,
BboxObjective = _reflection.GeneratedProtocolMessageType('BboxObjective', (_message.Message,), dict(
DESCRIPTOR = _MODELCONFIG_BBOXOBJECTIVE,
__module__ = 'nvidia_tao_deploy.cv.detectnet_v2.proto.model_config_pb2'
# @@protoc_insertion_point(class_scope:ModelConfig.BboxObjective)
))
,
CovObjective = _reflection.GeneratedProtocolMessageType('CovObjective', (_message.Message,), dict(
DESCRIPTOR = _MODELCONFIG_COVOBJECTIVE,
__module__ = 'nvidia_tao_deploy.cv.detectnet_v2.proto.model_config_pb2'
# @@protoc_insertion_point(class_scope:ModelConfig.CovObjective)
))
,
ObjectiveSet = _reflection.GeneratedProtocolMessageType('ObjectiveSet', (_message.Message,), dict(
DESCRIPTOR = _MODELCONFIG_OBJECTIVESET,
__module__ = 'nvidia_tao_deploy.cv.detectnet_v2.proto.model_config_pb2'
# @@protoc_insertion_point(class_scope:ModelConfig.ObjectiveSet)
))
,
TrainingPrecision = _reflection.GeneratedProtocolMessageType('TrainingPrecision', (_message.Message,), dict(
DESCRIPTOR = _MODELCONFIG_TRAININGPRECISION,
__module__ = 'nvidia_tao_deploy.cv.detectnet_v2.proto.model_config_pb2'
# @@protoc_insertion_point(class_scope:ModelConfig.TrainingPrecision)
))
,
DESCRIPTOR = _MODELCONFIG,
__module__ = 'nvidia_tao_deploy.cv.detectnet_v2.proto.model_config_pb2'
# @@protoc_insertion_point(class_scope:ModelConfig)
))
_sym_db.RegisterMessage(ModelConfig)
_sym_db.RegisterMessage(ModelConfig.Activation)
_sym_db.RegisterMessage(ModelConfig.Activation.ActivationParametersEntry)
_sym_db.RegisterMessage(ModelConfig.BboxObjective)
_sym_db.RegisterMessage(ModelConfig.CovObjective)
_sym_db.RegisterMessage(ModelConfig.ObjectiveSet)
_sym_db.RegisterMessage(ModelConfig.TrainingPrecision)
_MODELCONFIG_ACTIVATION_ACTIVATIONPARAMETERSENTRY._options = None
# @@protoc_insertion_point(module_scope)
| tao_deploy-main | nvidia_tao_deploy/cv/detectnet_v2/proto/model_config_pb2.py |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nvidia_tao_deploy/cv/detectnet_v2/proto/dataset_config.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='nvidia_tao_deploy/cv/detectnet_v2/proto/dataset_config.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n<nvidia_tao_deploy/cv/detectnet_v2/proto/dataset_config.proto\"Y\n\nDataSource\x12\x16\n\x0etfrecords_path\x18\x01 \x01(\t\x12\x1c\n\x14image_directory_path\x18\x02 \x01(\t\x12\x15\n\rsource_weight\x18\x03 \x01(\x02\"\x99\x04\n\rDatasetConfig\x12!\n\x0c\x64\x61ta_sources\x18\x01 \x03(\x0b\x32\x0b.DataSource\x12\x17\n\x0fimage_extension\x18\x02 \x01(\t\x12\x44\n\x14target_class_mapping\x18\x03 \x03(\x0b\x32&.DatasetConfig.TargetClassMappingEntry\x12\x19\n\x0fvalidation_fold\x18\x04 \x01(\rH\x00\x12-\n\x16validation_data_source\x18\x05 \x01(\x0b\x32\x0b.DataSourceH\x00\x12\x37\n\x0f\x64\x61taloader_mode\x18\x06 \x01(\x0e\x32\x1e.DatasetConfig.DATALOADER_MODE\x12\x33\n\rsampling_mode\x18\x07 \x01(\x0e\x32\x1c.DatasetConfig.SAMPLING_MODE\x1a\x39\n\x17TargetClassMappingEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\";\n\x0f\x44\x41TALOADER_MODE\x12\x0f\n\x0bMULTISOURCE\x10\x00\x12\n\n\x06LEGACY\x10\x01\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x02\"@\n\rSAMPLING_MODE\x12\x10\n\x0cUSER_DEFINED\x10\x00\x12\x10\n\x0cPROPORTIONAL\x10\x01\x12\x0b\n\x07UNIFORM\x10\x02\x42\x14\n\x12\x64\x61taset_split_typeb\x06proto3')
)
_DATASETCONFIG_DATALOADER_MODE = _descriptor.EnumDescriptor(
name='DATALOADER_MODE',
full_name='DatasetConfig.DATALOADER_MODE',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='MULTISOURCE', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='LEGACY', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DEFAULT', index=2, number=2,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=546,
serialized_end=605,
)
_sym_db.RegisterEnumDescriptor(_DATASETCONFIG_DATALOADER_MODE)
_DATASETCONFIG_SAMPLING_MODE = _descriptor.EnumDescriptor(
name='SAMPLING_MODE',
full_name='DatasetConfig.SAMPLING_MODE',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='USER_DEFINED', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='PROPORTIONAL', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='UNIFORM', index=2, number=2,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=607,
serialized_end=671,
)
_sym_db.RegisterEnumDescriptor(_DATASETCONFIG_SAMPLING_MODE)
_DATASOURCE = _descriptor.Descriptor(
name='DataSource',
full_name='DataSource',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='tfrecords_path', full_name='DataSource.tfrecords_path', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='image_directory_path', full_name='DataSource.image_directory_path', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='source_weight', full_name='DataSource.source_weight', index=2,
number=3, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=64,
serialized_end=153,
)
_DATASETCONFIG_TARGETCLASSMAPPINGENTRY = _descriptor.Descriptor(
name='TargetClassMappingEntry',
full_name='DatasetConfig.TargetClassMappingEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='DatasetConfig.TargetClassMappingEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='DatasetConfig.TargetClassMappingEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=_b('8\001'),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=487,
serialized_end=544,
)
_DATASETCONFIG = _descriptor.Descriptor(
name='DatasetConfig',
full_name='DatasetConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='data_sources', full_name='DatasetConfig.data_sources', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='image_extension', full_name='DatasetConfig.image_extension', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='target_class_mapping', full_name='DatasetConfig.target_class_mapping', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='validation_fold', full_name='DatasetConfig.validation_fold', index=3,
number=4, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='validation_data_source', full_name='DatasetConfig.validation_data_source', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='dataloader_mode', full_name='DatasetConfig.dataloader_mode', index=5,
number=6, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='sampling_mode', full_name='DatasetConfig.sampling_mode', index=6,
number=7, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_DATASETCONFIG_TARGETCLASSMAPPINGENTRY, ],
enum_types=[
_DATASETCONFIG_DATALOADER_MODE,
_DATASETCONFIG_SAMPLING_MODE,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='dataset_split_type', full_name='DatasetConfig.dataset_split_type',
index=0, containing_type=None, fields=[]),
],
serialized_start=156,
serialized_end=693,
)
_DATASETCONFIG_TARGETCLASSMAPPINGENTRY.containing_type = _DATASETCONFIG
_DATASETCONFIG.fields_by_name['data_sources'].message_type = _DATASOURCE
_DATASETCONFIG.fields_by_name['target_class_mapping'].message_type = _DATASETCONFIG_TARGETCLASSMAPPINGENTRY
_DATASETCONFIG.fields_by_name['validation_data_source'].message_type = _DATASOURCE
_DATASETCONFIG.fields_by_name['dataloader_mode'].enum_type = _DATASETCONFIG_DATALOADER_MODE
_DATASETCONFIG.fields_by_name['sampling_mode'].enum_type = _DATASETCONFIG_SAMPLING_MODE
_DATASETCONFIG_DATALOADER_MODE.containing_type = _DATASETCONFIG
_DATASETCONFIG_SAMPLING_MODE.containing_type = _DATASETCONFIG
_DATASETCONFIG.oneofs_by_name['dataset_split_type'].fields.append(
_DATASETCONFIG.fields_by_name['validation_fold'])
_DATASETCONFIG.fields_by_name['validation_fold'].containing_oneof = _DATASETCONFIG.oneofs_by_name['dataset_split_type']
_DATASETCONFIG.oneofs_by_name['dataset_split_type'].fields.append(
_DATASETCONFIG.fields_by_name['validation_data_source'])
_DATASETCONFIG.fields_by_name['validation_data_source'].containing_oneof = _DATASETCONFIG.oneofs_by_name['dataset_split_type']
DESCRIPTOR.message_types_by_name['DataSource'] = _DATASOURCE
DESCRIPTOR.message_types_by_name['DatasetConfig'] = _DATASETCONFIG
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
DataSource = _reflection.GeneratedProtocolMessageType('DataSource', (_message.Message,), dict(
DESCRIPTOR = _DATASOURCE,
__module__ = 'nvidia_tao_deploy.cv.detectnet_v2.proto.dataset_config_pb2'
# @@protoc_insertion_point(class_scope:DataSource)
))
_sym_db.RegisterMessage(DataSource)
DatasetConfig = _reflection.GeneratedProtocolMessageType('DatasetConfig', (_message.Message,), dict(
TargetClassMappingEntry = _reflection.GeneratedProtocolMessageType('TargetClassMappingEntry', (_message.Message,), dict(
DESCRIPTOR = _DATASETCONFIG_TARGETCLASSMAPPINGENTRY,
__module__ = 'nvidia_tao_deploy.cv.detectnet_v2.proto.dataset_config_pb2'
# @@protoc_insertion_point(class_scope:DatasetConfig.TargetClassMappingEntry)
))
,
DESCRIPTOR = _DATASETCONFIG,
__module__ = 'nvidia_tao_deploy.cv.detectnet_v2.proto.dataset_config_pb2'
# @@protoc_insertion_point(class_scope:DatasetConfig)
))
_sym_db.RegisterMessage(DatasetConfig)
_sym_db.RegisterMessage(DatasetConfig.TargetClassMappingEntry)
_DATASETCONFIG_TARGETCLASSMAPPINGENTRY._options = None
# @@protoc_insertion_point(module_scope)
| tao_deploy-main | nvidia_tao_deploy/cv/detectnet_v2/proto/dataset_config_pb2.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""DetectNetv2 convert etlt/onnx model to TRT engine."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import logging
import os
import tempfile
from nvidia_tao_deploy.cv.common.decorators import monitor_status
from nvidia_tao_deploy.cv.detectnet_v2.engine_builder import DetectNetEngineBuilder
from nvidia_tao_deploy.cv.detectnet_v2.proto.utils import load_proto
from nvidia_tao_deploy.utils.decoding import decode_model
logging.basicConfig(format='%(asctime)s [TAO Toolkit] [%(levelname)s] %(name)s %(lineno)d: %(message)s',
level="INFO")
logger = logging.getLogger(__name__)
DEFAULT_MAX_BATCH_SIZE = 1
DEFAULT_MIN_BATCH_SIZE = 1
DEFAULT_OPT_BATCH_SIZE = 1
@monitor_status(name='detectnet_v2', mode='gen_trt_engine')
def main(args):
"""DetectNetv2 TRT convert."""
# decrypt etlt
tmp_onnx_file, file_format = decode_model(args.model_path, args.key)
if args.engine_file is not None or args.data_type == 'int8':
if args.engine_file is None:
engine_handle, temp_engine_path = tempfile.mkstemp()
os.close(engine_handle)
output_engine_path = temp_engine_path
else:
output_engine_path = args.engine_file
experiment_spec = load_proto(args.experiment_spec)
if args.cal_image_dir:
calib_input = args.cal_image_dir
else:
# Load data sources from experiment specs
calib_input = []
dataset_proto = experiment_spec.dataset_config
for data_source_proto in dataset_proto.data_sources:
calib_input.append(str(data_source_proto.image_directory_path))
# DNv2 supports both UFF and ONNX
builder = DetectNetEngineBuilder(verbose=args.verbose,
is_qat=experiment_spec.training_config.enable_qat,
workspace=args.max_workspace_size,
min_batch_size=args.min_batch_size,
opt_batch_size=args.opt_batch_size,
max_batch_size=args.max_batch_size,
strict_type_constraints=args.strict_type_constraints,
force_ptq=args.force_ptq)
builder.create_network(tmp_onnx_file, file_format)
builder.create_engine(
output_engine_path,
args.data_type,
calib_data_file=args.cal_data_file,
calib_input=calib_input,
calib_cache=args.cal_cache_file,
calib_num_images=args.batch_size * args.batches,
calib_batch_size=args.batch_size,
calib_json_file=args.cal_json_file)
logging.info("Export finished successfully.")
def build_command_line_parser(parser=None):
"""Build the command line parser using argparse.
Args:
parser (subparser): Provided from the wrapper script to build a chained
parser mechanism.
Returns:
parser
"""
if parser is None:
parser = argparse.ArgumentParser(prog='gen_trt_engine', description='Generate TRT engine of DetectNetv2 model.')
parser.add_argument(
'-m',
'--model_path',
type=str,
required=True,
help='Path to a DetectNetv2 .etlt or .onnx model file.'
)
parser.add_argument(
'-k',
'--key',
type=str,
required=False,
help='Key to save or load a .etlt model.'
)
parser.add_argument(
"-e",
"--experiment_spec",
default="specs/experiment_spec.txt",
type=str,
required=True,
help="Experiment spec file for DetectNetv2.")
parser.add_argument(
"--data_type",
type=str,
default="fp32",
help="Data type for the TensorRT export.",
choices=["fp32", "fp16", "int8"])
parser.add_argument(
"--cal_image_dir",
default="",
type=str,
help="Directory of images to run int8 calibration.")
parser.add_argument(
"--cal_data_file",
default="",
type=str,
help="Tensorfile to run calibration for int8 optimization.")
parser.add_argument(
'--cal_cache_file',
default=None,
type=str,
help='Calibration cache file to write to.')
parser.add_argument(
'--cal_json_file',
default=None,
type=str,
help='Dictionary containing tensor scale for QAT models.')
parser.add_argument(
"--engine_file",
type=str,
default=None,
help="Path to the exported TRT engine.")
parser.add_argument(
"--max_batch_size",
type=int,
default=DEFAULT_MAX_BATCH_SIZE,
help="Max batch size for TensorRT engine builder.")
parser.add_argument(
"--min_batch_size",
type=int,
default=DEFAULT_MIN_BATCH_SIZE,
help="Min batch size for TensorRT engine builder.")
parser.add_argument(
"--opt_batch_size",
type=int,
default=DEFAULT_OPT_BATCH_SIZE,
help="Opt batch size for TensorRT engine builder.")
parser.add_argument(
"--batch_size",
type=int,
default=1,
help="Number of images per batch.")
parser.add_argument(
"--batches",
type=int,
default=10,
help="Number of batches to calibrate over.")
parser.add_argument(
"--max_workspace_size",
type=int,
default=2,
help="Max memory workspace size to allow in Gb for TensorRT engine builder (default: 2).")
parser.add_argument(
"-s",
"--strict_type_constraints",
action="store_true",
default=False,
help="A Boolean flag indicating whether to apply the \
TensorRT strict type constraints when building the TensorRT engine.")
parser.add_argument(
"--force_ptq",
action="store_true",
default=False,
help="Flag to force post training quantization for QAT models.")
parser.add_argument(
"-v",
"--verbose",
action="store_true",
default=False,
help="Verbosity of the logger.")
parser.add_argument(
'-r',
'--results_dir',
type=str,
required=True,
default=None,
help='Output directory where the log is saved.'
)
return parser
def parse_command_line_arguments(args=None):
"""Simple function to parse command line arguments."""
parser = build_command_line_parser(args)
return parser.parse_args(args)
if __name__ == '__main__':
args = parse_command_line_arguments()
main(args)
| tao_deploy-main | nvidia_tao_deploy/cv/detectnet_v2/scripts/gen_trt_engine.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TAO Deploy DetectNetv2 scripts module."""
| tao_deploy-main | nvidia_tao_deploy/cv/detectnet_v2/scripts/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Standalone TensorRT inference."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
from PIL import Image
import numpy as np
from tqdm.auto import tqdm
import logging
from nvidia_tao_deploy.cv.common.decorators import monitor_status
from nvidia_tao_deploy.cv.detectnet_v2.proto.utils import load_proto
logging.basicConfig(format='%(asctime)s [%(levelname)s] %(name)s: %(message)s',
level='INFO')
from nvidia_tao_deploy.cv.detectnet_v2.dataloader import DetectNetKITTILoader # noqa: E402
from nvidia_tao_deploy.cv.detectnet_v2.inferencer import DetectNetInferencer # noqa: E402
from nvidia_tao_deploy.cv.detectnet_v2.postprocessor import BboxHandler # noqa: E402
@monitor_status(name='detectnet_v2', mode='inference')
def main(args):
"""DetectNetv2 TRT inference."""
inferencer_spec = load_proto(args.experiment_spec, "inference")
# Load target classes from label file
target_classes = inferencer_spec.inferencer_config.target_classes
# Load mapping_dict from the spec file
mapping_dict = {c: c for c in target_classes}
batch_size = args.batch_size if args.batch_size else inferencer_spec.inferencer_config.batch_size
trt_infer = DetectNetInferencer(args.model_path,
batch_size=batch_size,
target_classes=target_classes)
if batch_size != trt_infer.max_batch_size and trt_infer.etlt_type == "uff":
logging.warning("Using deprecated UFF format. Overriding provided batch size "
"%d to engine's batch size %d", batch_size, trt_infer.max_batch_size)
batch_size = trt_infer.max_batch_size
c, h, w = trt_infer._input_shape
dl = DetectNetKITTILoader(
shape=(c, h, w),
image_dirs=[args.image_dir],
label_dirs=[None],
mapping_dict=mapping_dict,
exclude_difficult=True,
batch_size=batch_size,
is_inference=True,
image_mean=None,
dtype=trt_infer.inputs[0].host.dtype)
bboxer = BboxHandler(batch_size=batch_size,
frame_height=h,
frame_width=w,
target_classes=target_classes,
postproc_classes=target_classes,
classwise_cluster_params=inferencer_spec.bbox_handler_config.classwise_bbox_handler_config,
)
# Override class mapping with the class order specified by target_classes
dl.classes = {c: i + 1 for i, c in enumerate(target_classes)}
dl.class_mapping = {key.lower(): dl.classes[str(val.lower())]
for key, val in mapping_dict.items()}
inv_classes = {v: k for k, v in dl.classes.items()}
if args.results_dir is None:
results_dir = os.path.dirname(args.model_path)
else:
results_dir = args.results_dir
os.makedirs(results_dir, exist_ok=True)
output_annotate_root = os.path.join(results_dir, "images_annotated")
output_label_root = os.path.join(results_dir, "labels")
os.makedirs(output_annotate_root, exist_ok=True)
os.makedirs(output_label_root, exist_ok=True)
# Get classwise edge color
box_color = {}
for k, v in inferencer_spec.bbox_handler_config.classwise_bbox_handler_config.items():
box_color[k] = (0, 255, 0)
if v.bbox_color:
box_color[k] = (v.bbox_color.R, v.bbox_color.G, v.bbox_color.B)
for i, (imgs, _) in tqdm(enumerate(dl), total=len(dl), desc="Producing predictions"):
y_pred = trt_infer.infer(imgs)
processed_inference = bboxer.bbox_preprocessing(y_pred)
classwise_detections = bboxer.cluster_detections(processed_inference)
y_pred_valid = bboxer.postprocess(classwise_detections, batch_size, dl.image_size[i], (w, h), dl.classes)
image_paths = dl.image_paths[np.arange(batch_size) + batch_size * i]
for img_path, pred in zip(image_paths, y_pred_valid):
# Load image
img = Image.open(img_path)
# No need to rescale here as rescaling was done in bboxer.postprocess
bbox_img, label_strings = trt_infer.draw_bbox(img, pred, inv_classes, bboxer.state['confidence_th'], box_color)
img_filename = os.path.basename(img_path)
bbox_img.save(os.path.join(output_annotate_root, img_filename))
# Store labels
filename, _ = os.path.splitext(img_filename)
label_file_name = os.path.join(output_label_root, filename + ".txt")
with open(label_file_name, "w", encoding="utf-8") as f:
for l_s in label_strings:
f.write(l_s)
logging.info("Finished inference.")
def build_command_line_parser(parser=None):
"""Build the command line parser using argparse.
Args:
parser (subparser): Provided from the wrapper script to build a chained
parser mechanism.
Returns:
parser
"""
if parser is None:
parser = argparse.ArgumentParser(prog='infer', description='Inference with a DetectNetv2 TRT model.')
parser.add_argument(
'-i',
'--image_dir',
type=str,
required=False,
default=None,
help='Input directory of images')
parser.add_argument(
'-e',
'--experiment_spec',
type=str,
required=True,
help='Path to the experiment spec file.'
)
parser.add_argument(
'-m',
'--model_path',
type=str,
required=True,
help='Path to the RetinaNet TensorRT engine.'
)
parser.add_argument(
'-b',
'--batch_size',
type=int,
required=False,
default=None,
help='Batch size.')
parser.add_argument(
'-r',
'--results_dir',
type=str,
required=True,
default=None,
help='Output directory where the log is saved.'
)
return parser
def parse_command_line_arguments(args=None):
"""Simple function to parse command line arguments."""
parser = build_command_line_parser(args)
return parser.parse_args(args)
if __name__ == '__main__':
args = parse_command_line_arguments()
main(args)
| tao_deploy-main | nvidia_tao_deploy/cv/detectnet_v2/scripts/inference.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Standalone TensorRT evaluation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import json
import numpy as np
from tqdm.auto import tqdm
import logging
from nvidia_tao_deploy.cv.common.decorators import monitor_status
from nvidia_tao_deploy.cv.detectnet_v2.proto.utils import load_proto
from nvidia_tao_deploy.cv.detectnet_v2.proto.postprocessing_config import build_postprocessing_config
from nvidia_tao_deploy.cv.detectnet_v2.dataloader import DetectNetKITTILoader
from nvidia_tao_deploy.cv.detectnet_v2.inferencer import DetectNetInferencer
from nvidia_tao_deploy.cv.detectnet_v2.postprocessor import BboxHandler
from nvidia_tao_deploy.metrics.kitti_metric import KITTIMetric
logger = logging.getLogger(__name__)
logging.getLogger('PIL').setLevel(logging.WARNING)
logging.basicConfig(format='%(asctime)s [%(levelname)s] %(name)s: %(message)s',
level='INFO')
@monitor_status(name='detectnet_v2', mode='evaluation')
def main(args):
"""DetectNetv2 TRT evaluation."""
experiment_spec = load_proto(args.experiment_spec)
pproc_config = build_postprocessing_config(experiment_spec.postprocessing_config)
# Load mapping_dict from the spec file
mapping_dict = dict(experiment_spec.dataset_config.target_class_mapping)
# Load target classes from label file
target_classes = [target_class.name for target_class in experiment_spec.cost_function_config.target_classes]
batch_size = args.batch_size if args.batch_size else experiment_spec.training_config.batch_size_per_gpu
trt_infer = DetectNetInferencer(args.model_path,
batch_size=batch_size,
target_classes=target_classes)
if batch_size != trt_infer.max_batch_size and trt_infer.etlt_type == "uff":
logging.warning("Using deprecated UFF format. Overriding provided batch size "
"%d to engine's batch size %d", batch_size, trt_infer.max_batch_size)
batch_size = trt_infer.max_batch_size
c, h, w = trt_infer._input_shape
dl = DetectNetKITTILoader(
shape=(c, h, w),
image_dirs=[args.image_dir],
label_dirs=[args.label_dir],
mapping_dict=mapping_dict,
exclude_difficult=True,
batch_size=batch_size,
image_mean=None,
dtype=trt_infer.inputs[0].host.dtype)
bboxer = BboxHandler(batch_size=batch_size,
frame_height=h,
frame_width=w,
target_classes=target_classes,
postproc_classes=target_classes,
classwise_cluster_params=pproc_config,
)
# Override class mapping with the class order specified by target_classes
dl.classes = {c: i + 1 for i, c in enumerate(target_classes)}
dl.class_mapping = {key.lower(): dl.classes[str(val.lower())]
for key, val in mapping_dict.items()}
eval_metric = KITTIMetric(n_classes=len(dl.classes) + 1)
gt_labels = []
pred_labels = []
for i, (imgs, labels) in tqdm(enumerate(dl), total=len(dl), desc="Producing predictions"):
gt_labels.extend(labels)
y_pred = trt_infer.infer(imgs)
processed_inference = bboxer.bbox_preprocessing(y_pred)
classwise_detections = bboxer.cluster_detections(processed_inference)
y_pred_valid = bboxer.postprocess(classwise_detections, batch_size, dl.image_size[i], (w, h), dl.classes)
pred_labels.extend(y_pred_valid)
m_ap, ap = eval_metric(gt_labels, pred_labels, verbose=True)
m_ap = np.mean(ap[1:])
logging.info("*******************************")
class_mapping = {v: k for k, v in dl.classes.items()}
eval_results = {}
for i in range(len(dl.classes)):
eval_results['AP_' + class_mapping[i + 1]] = np.float64(ap[i + 1])
logging.info("{:<14}{:<6}{}".format(class_mapping[i + 1], 'AP', round(ap[i + 1], 5))) # noqa pylint: disable=C0209
logging.info("{:<14}{:<6}{}".format('', 'mAP', round(m_ap, 3))) # noqa pylint: disable=C0209
logging.info("*******************************")
# Store evaluation results into JSON
if args.results_dir is None:
results_dir = os.path.dirname(args.model_path)
else:
results_dir = args.results_dir
with open(os.path.join(results_dir, "results.json"), "w", encoding="utf-8") as f:
json.dump(eval_results, f)
logging.info("Finished evaluation.")
def build_command_line_parser(parser=None):
"""Build the command line parser using argparse.
Args:
parser (subparser): Provided from the wrapper script to build a chained
parser mechanism.
Returns:
parser
"""
if parser is None:
parser = argparse.ArgumentParser(prog='eval', description='Evaluate with a RetinaNet TRT model.')
parser.add_argument(
'-i',
'--image_dir',
type=str,
required=False,
default=None,
help='Input directory of images')
parser.add_argument(
'-e',
'--experiment_spec',
type=str,
required=True,
help='Path to the experiment spec file.'
)
parser.add_argument(
'-m',
'--model_path',
type=str,
required=True,
help='Path to the RetinaNet TensorRT engine.'
)
parser.add_argument(
'-l',
'--label_dir',
type=str,
required=False,
help='Label directory.')
parser.add_argument(
'-b',
'--batch_size',
type=int,
required=False,
default=1,
help='Batch size.')
parser.add_argument(
'-r',
'--results_dir',
type=str,
required=True,
default=None,
help='Output directory where the log is saved.'
)
return parser
def parse_command_line_arguments(args=None):
"""Simple function to parse command line arguments."""
parser = build_command_line_parser(args)
return parser.parse_args(args)
if __name__ == '__main__':
args = parse_command_line_arguments()
main(args)
| tao_deploy-main | nvidia_tao_deploy/cv/detectnet_v2/scripts/evaluate.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Entrypoint module for DetectNetv2."""
| tao_deploy-main | nvidia_tao_deploy/cv/detectnet_v2/entrypoint/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TAO Deploy command line wrapper to invoke CLI scripts."""
import sys
from nvidia_tao_deploy.cv.common.entrypoint.entrypoint_proto import launch_job
import nvidia_tao_deploy.cv.detectnet_v2.scripts
def main():
"""Function to launch the job."""
launch_job(nvidia_tao_deploy.cv.detectnet_v2.scripts, "detectnet_v2", sys.argv[1:])
if __name__ == "__main__":
main()
| tao_deploy-main | nvidia_tao_deploy/cv/detectnet_v2/entrypoint/detectnet_v2.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility class for performing TensorRT image inference."""
import numpy as np
import tensorrt as trt
from nvidia_tao_deploy.inferencer.trt_inferencer import TRTInferencer
from nvidia_tao_deploy.inferencer.utils import allocate_buffers, do_inference
def trt_output_process_fn(y_encoded):
"""Function to process TRT model output."""
cls_out = y_encoded[0]
return np.copy(cls_out)
class ClassificationInferencer(TRTInferencer):
"""Manages TensorRT objects for model inference."""
def __init__(self, engine_path, input_shape=None, batch_size=None, data_format="channel_first"):
"""Initializes TensorRT objects needed for model inference.
Args:
engine_path (str): path where TensorRT engine should be stored
input_shape (tuple): (batch, channel, height, width) for dynamic shape engine
batch_size (int): batch size for dynamic shape engine
data_format (str): either channel_first or channel_last
"""
# Load TRT engine
super().__init__(engine_path)
self.max_batch_size = self.engine.max_batch_size
self.execute_v2 = False
# Execution context is needed for inference
self.context = None
# Allocate memory for multiple usage [e.g. multiple batch inference]
self._input_shape = []
for binding in range(self.engine.num_bindings):
if self.engine.binding_is_input(binding):
binding_shape = self.engine.get_binding_shape(binding)
self._input_shape = binding_shape[-3:]
if len(binding_shape) == 4:
self.etlt_type = "onnx"
else:
self.etlt_type = "uff"
assert len(self._input_shape) == 3, "Engine doesn't have valid input dimensions"
if data_format == "channel_first":
self.height = self._input_shape[1]
self.width = self._input_shape[2]
else:
self.height = self._input_shape[0]
self.width = self._input_shape[1]
# set binding_shape for dynamic input
# do not override if the original model was uff
if (input_shape is not None or batch_size is not None) and (self.etlt_type != "uff"):
self.context = self.engine.create_execution_context()
if input_shape is not None:
self.context.set_binding_shape(0, input_shape)
self.max_batch_size = input_shape[0]
else:
self.context.set_binding_shape(0, [batch_size] + list(self._input_shape))
self.max_batch_size = batch_size
self.execute_v2 = True
# This allocates memory for network inputs/outputs on both CPU and GPU
self.inputs, self.outputs, self.bindings, self.stream = allocate_buffers(self.engine,
self.context)
if self.context is None:
self.context = self.engine.create_execution_context()
input_volume = trt.volume(self._input_shape)
self.numpy_array = np.zeros((self.max_batch_size, input_volume))
def infer(self, imgs):
"""Infers model on batch of same sized images resized to fit the model.
Args:
image_paths (str): paths to images, that will be packed into batch
and fed into model
"""
# Verify if the supplied batch size is not too big
max_batch_size = self.max_batch_size
actual_batch_size = len(imgs)
if actual_batch_size > max_batch_size:
raise ValueError(f"image_paths list bigger ({actual_batch_size}) than \
engine max batch size ({max_batch_size})")
self.numpy_array[:actual_batch_size] = imgs.reshape(actual_batch_size, -1)
# ...copy them into appropriate place into memory...
# (self.inputs was returned earlier by allocate_buffers())
np.copyto(self.inputs[0].host, self.numpy_array.ravel())
# ...fetch model outputs...
results = do_inference(
self.context, bindings=self.bindings, inputs=self.inputs,
outputs=self.outputs, stream=self.stream,
batch_size=max_batch_size,
execute_v2=self.execute_v2)
# ...and return results up to the actual batch size.
y_pred = [i.reshape(max_batch_size, -1)[:actual_batch_size] for i in results]
# Process TRT outputs to proper format
return trt_output_process_fn(y_pred)
def __del__(self):
"""Clear things up on object deletion."""
# Clear session and buffer
if self.trt_runtime:
del self.trt_runtime
if self.context:
del self.context
if self.engine:
del self.engine
if self.stream:
del self.stream
# Loop through inputs and free inputs.
for inp in self.inputs:
inp.device.free()
# Loop through outputs and free them.
for out in self.outputs:
out.device.free()
| tao_deploy-main | nvidia_tao_deploy/cv/classification_tf1/inferencer.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classification TensorRT engine builder."""
import logging
from pathlib import Path
import os
import random
from six.moves import xrange
import sys
import traceback
from tqdm import tqdm
try:
from uff.model.uff_pb2 import MetaGraph
except ImportError:
print("Loading uff directly from the package source code")
# @scha: To disable tensorflow import issue
import importlib
import types
import pkgutil
package = pkgutil.get_loader("uff")
# Returns __init__.py path
src_code = package.get_filename().replace('__init__.py', 'model/uff_pb2.py')
loader = importlib.machinery.SourceFileLoader('helper', src_code)
helper = types.ModuleType(loader.name)
loader.exec_module(helper)
MetaGraph = helper.MetaGraph
import numpy as np
import onnx
import tensorrt as trt
from nvidia_tao_deploy.cv.common.constants import VALID_IMAGE_EXTENSIONS
from nvidia_tao_deploy.engine.builder import EngineBuilder
from nvidia_tao_deploy.engine.tensorfile import TensorFile
from nvidia_tao_deploy.engine.tensorfile_calibrator import TensorfileCalibrator
from nvidia_tao_deploy.engine.utils import generate_random_tensorfile, prepare_chunk
logging.basicConfig(format='%(asctime)s [TAO Toolkit] [%(levelname)s] %(name)s %(lineno)d: %(message)s',
level="INFO")
logger = logging.getLogger(__name__)
class ClassificationEngineBuilder(EngineBuilder):
"""Parses an UFF graph and builds a TensorRT engine from it."""
def __init__(
self,
image_mean=None,
data_format="channels_first",
preprocess_mode="caffe",
**kwargs
):
"""Init.
Args:
image_mean (list): Image mean per channel.
data_format (str): data_format.
preprocess_mode (str): preprocessing mode to use on input image.
"""
super().__init__(**kwargs)
self.image_mean = image_mean
self._data_format = data_format
self.preprocess_mode = preprocess_mode
def set_input_output_node_names(self):
"""Set input output node names."""
self._output_node_names = ["predictions/Softmax"]
self._input_node_names = ["input_1"]
def get_uff_input_dims(self, model_path):
"""Get input dimension of UFF model."""
metagraph = MetaGraph()
with open(model_path, "rb") as f:
metagraph.ParseFromString(f.read())
for node in metagraph.graphs[0].nodes:
if node.operation == "Input":
return np.array(node.fields['shape'].i_list.val)[1:]
raise ValueError("Input dimension is not found in the UFF metagraph.")
def get_onnx_input_dims(self, model_path):
"""Get input dimension of ONNX model."""
onnx_model = onnx.load(model_path)
onnx_inputs = onnx_model.graph.input
logger.info('List inputs:')
for i, inputs in enumerate(onnx_inputs):
logger.info('Input %s -> %s.', i, inputs.name)
logger.info('%s.', [i.dim_value for i in inputs.type.tensor_type.shape.dim][1:])
logger.info('%s.', [i.dim_value for i in inputs.type.tensor_type.shape.dim][0])
return [i.dim_value for i in inputs.type.tensor_type.shape.dim][:]
def create_network(self, model_path, file_format="onnx"):
"""Parse the UFF/ONNX graph and create the corresponding TensorRT network definition.
Args:
model_path: The path to the UFF/ONNX graph to load.
file_format: The file format of the decrypted etlt file (default: onnx).
"""
if file_format == "onnx":
logger.info("Parsing ONNX model")
self._input_dims = self.get_onnx_input_dims(model_path)
self.batch_size = self._input_dims[0]
self._input_dims = self._input_dims[1:]
network_flags = 1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
network_flags = network_flags | (1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_PRECISION))
self.network = self.builder.create_network(network_flags)
self.parser = trt.OnnxParser(self.network, self.trt_logger)
model_path = os.path.realpath(model_path)
with open(model_path, "rb") as f:
if not self.parser.parse(f.read()):
logger.error("Failed to load ONNX file: %s", model_path)
for error in range(self.parser.num_errors):
logger.error(self.parser.get_error(error))
sys.exit(1)
inputs = [self.network.get_input(i) for i in range(self.network.num_inputs)]
outputs = [self.network.get_output(i) for i in range(self.network.num_outputs)]
logger.info("Network Description")
for input in inputs: # noqa pylint: disable=W0622
logger.info("Input '%s' with shape %s and dtype %s", input.name, input.shape, input.dtype)
for output in outputs:
logger.info("Output '%s' with shape %s and dtype %s", output.name, output.shape, output.dtype)
if self.batch_size <= 0: # dynamic batch size
logger.info("dynamic batch size handling")
opt_profile = self.builder.create_optimization_profile()
model_input = self.network.get_input(0)
input_shape = model_input.shape
input_name = model_input.name
real_shape_min = (self.min_batch_size, input_shape[1],
input_shape[2], input_shape[3])
real_shape_opt = (self.opt_batch_size, input_shape[1],
input_shape[2], input_shape[3])
real_shape_max = (self.max_batch_size, input_shape[1],
input_shape[2], input_shape[3])
opt_profile.set_shape(input=input_name,
min=real_shape_min,
opt=real_shape_opt,
max=real_shape_max)
self.config.add_optimization_profile(opt_profile)
else:
self.builder.max_batch_size = self.max_batch_size
else:
logger.info("Parsing UFF model")
self.network = self.builder.create_network()
self.parser = trt.UffParser()
self.set_input_output_node_names()
in_tensor_name = self._input_node_names[0]
self._input_dims = self.get_uff_input_dims(model_path)
input_dict = {in_tensor_name: self._input_dims}
for key, value in input_dict.items():
if self._data_format == "channels_first":
self.parser.register_input(key, value, trt.UffInputOrder(0))
else:
self.parser.register_input(key, value, trt.UffInputOrder(1))
for name in self._output_node_names:
self.parser.register_output(name)
self.builder.max_batch_size = self.max_batch_size
try:
assert self.parser.parse(model_path, self.network, trt.DataType.FLOAT)
except AssertionError as e:
logger.error("Failed to parse UFF File")
_, _, tb = sys.exc_info()
traceback.print_tb(tb) # Fixed format
tb_info = traceback.extract_tb(tb)
_, line, _, text = tb_info[-1]
raise AssertionError(
f"UFF parsing failed on line {line} in statement {text}"
) from e
def create_engine(self, engine_path, precision,
calib_input=None, calib_cache=None, calib_num_images=5000,
calib_batch_size=8, calib_data_file=None):
"""Build the TensorRT engine and serialize it to disk.
Args:
engine_path: The path where to serialize the engine to.
precision: The datatype to use for the engine, either 'fp32', 'fp16' or 'int8'.
calib_input: The path to a directory holding the calibration images.
calib_cache: The path where to write the calibration cache to,
or if it already exists, load it from.
calib_num_images: The maximum number of images to use for calibration.
calib_batch_size: The batch size to use for the calibration process.
"""
engine_path = os.path.realpath(engine_path)
engine_dir = os.path.dirname(engine_path)
os.makedirs(engine_dir, exist_ok=True)
logger.debug("Building %s Engine in %s", precision, engine_path)
inputs = [self.network.get_input(i) for i in range(self.network.num_inputs)]
if self.batch_size is None:
self.batch_size = calib_batch_size
self.builder.max_batch_size = self.batch_size
if precision.lower() == "fp16":
if not self.builder.platform_has_fast_fp16:
logger.warning("FP16 is not supported natively on this platform/device")
else:
self.config.set_flag(trt.BuilderFlag.FP16)
elif precision.lower() == "int8":
if not self.builder.platform_has_fast_int8:
logger.warning("INT8 is not supported natively on this platform/device")
elif self._is_qat:
# Only applicable in TF2.
# TF2 embeds QAT scales into the ONNX directly.
# Hence, no need to set dynamic range of tensors.
self.config.set_flag(trt.BuilderFlag.INT8)
else:
if self.builder.platform_has_fast_fp16 and not self._strict_type:
# Also enable fp16, as some layers may be even more efficient in fp16 than int8
self.config.set_flag(trt.BuilderFlag.FP16)
else:
self.config.set_flag(trt.BuilderFlag.STRICT_TYPES)
self.config.set_flag(trt.BuilderFlag.INT8)
# Set Tensorfile based calibrator
self.set_calibrator(inputs=inputs,
calib_cache=calib_cache,
calib_input=calib_input,
calib_num_images=calib_num_images,
calib_batch_size=calib_batch_size,
calib_data_file=calib_data_file,
image_mean=self.image_mean)
with self.builder.build_engine(self.network, self.config) as engine, \
open(engine_path, "wb") as f:
logger.debug("Serializing engine to file: %s", engine_path)
f.write(engine.serialize())
def set_calibrator(self,
inputs=None,
calib_cache=None,
calib_input=None,
calib_num_images=5000,
calib_batch_size=8,
calib_data_file=None,
image_mean=None):
"""Simple function to set an Tensorfile based int8 calibrator.
Args:
calib_data_file: Path to the TensorFile. If the tensorfile doesn't exist
at this path, then one is created with either n_batches
of random tensors, images from the file in calib_input of dimensions
(batch_size,) + (input_dims).
calib_input: The path to a directory holding the calibration images.
calib_cache: The path where to write the calibration cache to,
or if it already exists, load it from.
calib_num_images: The maximum number of images to use for calibration.
calib_batch_size: The batch size to use for the calibration process.
image_mean: Image mean per channel.
Returns:
No explicit returns.
"""
logger.info("Calibrating using TensorfileCalibrator")
n_batches = calib_num_images // calib_batch_size
if not os.path.exists(calib_data_file):
self.generate_tensor_file(calib_data_file,
calib_input,
self._input_dims,
n_batches=n_batches,
batch_size=calib_batch_size,
image_mean=image_mean)
self.config.int8_calibrator = TensorfileCalibrator(calib_data_file,
calib_cache,
n_batches,
calib_batch_size)
def generate_tensor_file(self, data_file_name,
calibration_images_dir,
input_dims, n_batches=10,
batch_size=1, image_mean=None):
"""Generate calibration Tensorfile for int8 calibrator.
This function generates a calibration tensorfile from a directory of images, or dumps
n_batches of random numpy arrays of shape (batch_size,) + (input_dims).
Args:
data_file_name (str): Path to the output tensorfile to be saved.
calibration_images_dir (str): Path to the images to generate a tensorfile from.
input_dims (list): Input shape in CHW order.
n_batches (int): Number of batches to be saved.
batch_size (int): Number of images per batch.
image_mean (list): Image mean per channel.
Returns:
No explicit returns.
"""
if not os.path.exists(calibration_images_dir):
logger.info("Generating a tensorfile with random tensor images. This may work well as "
"a profiling tool, however, it may result in inaccurate results at "
"inference. Please generate a tensorfile using the tlt-int8-tensorfile, "
"or provide a custom directory of images for best performance.")
generate_random_tensorfile(data_file_name,
input_dims,
n_batches=n_batches,
batch_size=batch_size)
else:
# Preparing the list of images to be saved.
num_images = n_batches * batch_size
# classification has sub-directory structure where each directory is a single class
image_list = [p.resolve() for p in Path(calibration_images_dir).glob("**/*") if p.suffix in VALID_IMAGE_EXTENSIONS]
if len(image_list) < num_images:
raise ValueError('Not enough number of images provided:'
f' {len(image_list)} < {num_images}')
image_idx = random.sample(xrange(len(image_list)), num_images)
self.set_data_preprocessing_parameters(input_dims, image_mean)
if self._data_format == "channels_first":
channels, image_height, image_width = input_dims[0], input_dims[1], input_dims[2]
else:
channels, image_height, image_width = input_dims[2], input_dims[0], input_dims[1]
# Writing out processed dump.
with TensorFile(data_file_name, 'w') as f:
for chunk in tqdm(image_idx[x:x + batch_size] for x in xrange(0, len(image_idx),
batch_size)):
dump_data = prepare_chunk(chunk, image_list,
image_width=image_width,
image_height=image_height,
channels=channels,
batch_size=batch_size,
**self.preprocessing_arguments)
f.write(dump_data)
f.closed
def set_data_preprocessing_parameters(self, input_dims, image_mean=None):
"""Set data pre-processing parameters for the int8 calibration."""
if self.preprocess_mode == "torch":
default_mean = [123.675, 116.280, 103.53]
default_scale = 0.017507
else:
default_mean = [103.939, 116.779, 123.68]
default_scale = 1.0
if self._data_format == "channels_first":
num_channels = input_dims[0]
else:
num_channels = input_dims[-1]
if num_channels == 3:
if not image_mean:
means = default_mean
else:
assert len(image_mean) == 3, "Image mean should have 3 values for RGB inputs."
means = image_mean
elif num_channels == 1:
if not image_mean:
means = [117.3786]
else:
assert len(image_mean) == 1, "Image mean should have 1 value for grayscale inputs."
means = image_mean
else:
raise NotImplementedError(
f"Invalid number of dimensions {num_channels}.")
self.preprocessing_arguments = {"scale": default_scale,
"means": means,
"flip_channel": True}
| tao_deploy-main | nvidia_tao_deploy/cv/classification_tf1/engine_builder.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TAO Deploy TF1 Classification."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_deploy-main | nvidia_tao_deploy/cv/classification_tf1/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classification loader."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from pathlib import Path
from abc import ABC
import numpy as np
from PIL import Image
from nvidia_tao_deploy.cv.common.constants import VALID_IMAGE_EXTENSIONS
from nvidia_tao_deploy.inferencer.preprocess_input import preprocess_input
# padding size.
# We firstly resize to (target_width + CROP_PADDING, target_height + CROP_PADDING)
# , then crop to (target_width, target_height).
# for standard ImageNet size: 224x224 the ratio is 0.875(224 / (224 + 32)).
# but for EfficientNet B1-B7, larger resolution is used, hence this ratio
# is no longer 0.875
# ref:
# https://github.com/tensorflow/tpu/blob/r1.15/models/official/efficientnet/preprocessing.py#L110
CROP_PADDING = 32
_PIL_INTERPOLATION_METHODS = {
'nearest': Image.NEAREST,
'bilinear': Image.BILINEAR,
'bicubic': Image.BICUBIC,
}
class ClassificationLoader(ABC):
"""Classification Dataloader."""
def __init__(self,
shape,
image_dirs,
class_mapping,
is_inference=False,
batch_size=10,
data_format="channels_first",
interpolation_method="bicubic",
mode="caffe",
crop="center",
image_mean=None,
image_std=None,
image_depth=8,
dtype=None):
"""Init.
Args:
shape (list): list of input dimension that is either (c, h, w) or (h, w, c) format.
image_dirs (list): list of image directories.
label_dirs (list): list of label directories.
class_mapping (dict): class mapping. e.g. {'aeroplane': 0, 'car': 1}
is_inference (bool): If set true, we do not load labels (Default: False)
interpolation_method (str): Bilinear / Bicubic.
mode (str): caffe / torch
crop (str): random / center
batch_size (int): size of the batch.
image_mean (list): image mean used for preprocessing.
image_std (list): image std used for preprocessing.
image_depth(int): Bit depth of images(8 or 16).
dtype (str): data type to cast to
"""
self.image_paths = []
self.is_inference = is_inference
self._add_source(image_dirs[0]) # WARNING(@yuw): hardcoded 0
self.image_paths = np.array(self.image_paths)
self.data_inds = np.arange(len(self.image_paths))
self.class_mapping = class_mapping
self.resample = _PIL_INTERPOLATION_METHODS[interpolation_method]
self.mode = mode
self.crop = crop
self.data_format = data_format
if data_format == "channels_first":
self.num_channels, self.height, self.width = shape
else:
self.height, self.width, self.num_channels = shape
self.image_depth = image_depth
self.batch_size = batch_size
self.image_mean = image_mean
self.image_std = image_std
self.n_samples = len(self.data_inds)
self.dtype = dtype
self.n_batches = int(len(self.image_paths) // self.batch_size)
assert self.n_batches > 0, "empty image dir or batch size too large!"
self.model_img_mode = 'rgb' if self.num_channels == 3 else 'grayscale'
def _add_source(self, image_folder):
"""Add classification sources."""
images = [p.resolve() for p in Path(image_folder).glob("**/*") if p.suffix in VALID_IMAGE_EXTENSIONS]
images = sorted(images)
self.image_paths = images
def __len__(self):
"""Get length of Sequence."""
return self.n_batches
def _load_gt_image(self, image_path):
"""Load GT image from file."""
img = Image.open(image_path)
if self.num_channels == 3:
img = img.convert('RGB') # Color Image
else:
if self.image_depth == 16:
img = img.convert('I') # PIL int32 mode for 16-bit images
else:
img = img.convert('L') # Grayscale Image
return img
def __iter__(self):
"""Iterate."""
self.n = 0
return self
def __next__(self):
"""Load a full batch."""
images = []
labels = []
if self.n < self.n_batches:
for idx in range(self.n * self.batch_size,
(self.n + 1) * self.batch_size):
image, label = self._get_single_processed_item(idx)
images.append(image)
labels.append(label)
self.n += 1
return self._batch_post_processing(images, labels)
raise StopIteration
def _batch_post_processing(self, images, labels):
"""Post processing for a batch."""
images = np.array(images)
# try to make labels a numpy array
is_make_array = True
x_shape = None
for x in labels:
if not isinstance(x, np.ndarray):
is_make_array = False
break
if x_shape is None:
x_shape = x.shape
elif x_shape != x.shape:
is_make_array = False
break
if is_make_array:
labels = np.array(labels)
return images, labels
def _get_single_processed_item(self, idx):
"""Load and process single image and its label."""
image, label = self._get_single_item_raw(idx)
image = self.preprocessing(image)
return image, label
def _get_single_item_raw(self, idx):
"""Load single image and its label.
Returns:
image (PIL.image): image object in original resolution
label (int): one-hot encoded class label
"""
image = self._load_gt_image(self.image_paths[self.data_inds[idx]])
img_dir = os.path.dirname(self.image_paths[self.data_inds[idx]])
if self.is_inference:
label = -1
else:
label = self.class_mapping[os.path.basename(img_dir)]
return image, label
def preprocessing(self, image):
"""The image preprocessor loads an image from disk and prepares it as needed for batching.
This includes padding, resizing, normalization, data type casting, and transposing.
Args:
image (PIL.image): The Pillow image on disk to load.
Returns:
image (np.array): A numpy array holding the image sample, ready to be concatenated
into the rest of the batch
"""
width, height = image.size
if self.crop == 'center':
# Resize keeping aspect ratio
# result should be no smaller than the targer size, include crop fraction overhead
target_size_before_crop = (
self.width + CROP_PADDING,
self.height + CROP_PADDING
)
ratio = max(
target_size_before_crop[0] / width,
target_size_before_crop[1] / height
)
target_size_before_crop_keep_ratio = int(width * ratio), int(height * ratio)
image = image.resize(target_size_before_crop_keep_ratio, resample=self.resample)
width, height = image.size
left_corner = int(round(width / 2)) - int(round(self.width / 2))
top_corner = int(round(height / 2)) - int(round(self.height / 2))
image = image.crop(
(left_corner,
top_corner,
left_corner + self.width,
top_corner + self.height))
else:
image = image.resize((self.width, self.height), self.resample)
image = np.asarray(image, dtype=self.dtype)
if self.data_format == "channels_first":
if image.ndim == 2 and self.model_img_mode == 'grayscale':
image = np.expand_dims(image, axis=2)
image = np.transpose(image, (2, 0, 1))
# Normalize and apply imag mean and std
image = preprocess_input(image,
data_format=self.data_format,
img_mean=self.image_mean,
img_std=self.image_std,
img_depth=self.image_depth,
mode=self.mode,
color_mode=self.model_img_mode)
return image
| tao_deploy-main | nvidia_tao_deploy/cv/classification_tf1/dataloader.py |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nvidia_tao_deploy/cv/classification_tf1/proto/optimizer_config.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='nvidia_tao_deploy/cv/classification_tf1/proto/optimizer_config.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\nDnvidia_tao_deploy/cv/classification_tf1/proto/optimizer_config.proto\"S\n\x12SgdOptimizerConfig\x12\n\n\x02lr\x18\x01 \x01(\x02\x12\r\n\x05\x64\x65\x63\x61y\x18\x02 \x01(\x02\x12\x10\n\x08momentum\x18\x03 \x01(\x02\x12\x10\n\x08nesterov\x18\x04 \x01(\x08\"a\n\x13\x41\x64\x61mOptimizerConfig\x12\n\n\x02lr\x18\x01 \x01(\x02\x12\x0e\n\x06\x62\x65ta_1\x18\x02 \x01(\x02\x12\x0e\n\x06\x62\x65ta_2\x18\x03 \x01(\x02\x12\x0f\n\x07\x65psilon\x18\x04 \x01(\x02\x12\r\n\x05\x64\x65\x63\x61y\x18\x05 \x01(\x02\"Q\n\x16RmspropOptimizerConfig\x12\n\n\x02lr\x18\x01 \x01(\x02\x12\x0b\n\x03rho\x18\x02 \x01(\x02\x12\x0f\n\x07\x65psilon\x18\x03 \x01(\x02\x12\r\n\x05\x64\x65\x63\x61y\x18\x04 \x01(\x02\"\x90\x01\n\x0fOptimizerConfig\x12\"\n\x03sgd\x18\x01 \x01(\x0b\x32\x13.SgdOptimizerConfigH\x00\x12$\n\x04\x61\x64\x61m\x18\x02 \x01(\x0b\x32\x14.AdamOptimizerConfigH\x00\x12*\n\x07rmsprop\x18\x03 \x01(\x0b\x32\x17.RmspropOptimizerConfigH\x00\x42\x07\n\x05optimb\x06proto3')
)
_SGDOPTIMIZERCONFIG = _descriptor.Descriptor(
name='SgdOptimizerConfig',
full_name='SgdOptimizerConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='lr', full_name='SgdOptimizerConfig.lr', index=0,
number=1, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='decay', full_name='SgdOptimizerConfig.decay', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='momentum', full_name='SgdOptimizerConfig.momentum', index=2,
number=3, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='nesterov', full_name='SgdOptimizerConfig.nesterov', index=3,
number=4, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=72,
serialized_end=155,
)
_ADAMOPTIMIZERCONFIG = _descriptor.Descriptor(
name='AdamOptimizerConfig',
full_name='AdamOptimizerConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='lr', full_name='AdamOptimizerConfig.lr', index=0,
number=1, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='beta_1', full_name='AdamOptimizerConfig.beta_1', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='beta_2', full_name='AdamOptimizerConfig.beta_2', index=2,
number=3, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='epsilon', full_name='AdamOptimizerConfig.epsilon', index=3,
number=4, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='decay', full_name='AdamOptimizerConfig.decay', index=4,
number=5, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=157,
serialized_end=254,
)
_RMSPROPOPTIMIZERCONFIG = _descriptor.Descriptor(
name='RmspropOptimizerConfig',
full_name='RmspropOptimizerConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='lr', full_name='RmspropOptimizerConfig.lr', index=0,
number=1, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='rho', full_name='RmspropOptimizerConfig.rho', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='epsilon', full_name='RmspropOptimizerConfig.epsilon', index=2,
number=3, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='decay', full_name='RmspropOptimizerConfig.decay', index=3,
number=4, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=256,
serialized_end=337,
)
_OPTIMIZERCONFIG = _descriptor.Descriptor(
name='OptimizerConfig',
full_name='OptimizerConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='sgd', full_name='OptimizerConfig.sgd', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='adam', full_name='OptimizerConfig.adam', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='rmsprop', full_name='OptimizerConfig.rmsprop', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='optim', full_name='OptimizerConfig.optim',
index=0, containing_type=None, fields=[]),
],
serialized_start=340,
serialized_end=484,
)
_OPTIMIZERCONFIG.fields_by_name['sgd'].message_type = _SGDOPTIMIZERCONFIG
_OPTIMIZERCONFIG.fields_by_name['adam'].message_type = _ADAMOPTIMIZERCONFIG
_OPTIMIZERCONFIG.fields_by_name['rmsprop'].message_type = _RMSPROPOPTIMIZERCONFIG
_OPTIMIZERCONFIG.oneofs_by_name['optim'].fields.append(
_OPTIMIZERCONFIG.fields_by_name['sgd'])
_OPTIMIZERCONFIG.fields_by_name['sgd'].containing_oneof = _OPTIMIZERCONFIG.oneofs_by_name['optim']
_OPTIMIZERCONFIG.oneofs_by_name['optim'].fields.append(
_OPTIMIZERCONFIG.fields_by_name['adam'])
_OPTIMIZERCONFIG.fields_by_name['adam'].containing_oneof = _OPTIMIZERCONFIG.oneofs_by_name['optim']
_OPTIMIZERCONFIG.oneofs_by_name['optim'].fields.append(
_OPTIMIZERCONFIG.fields_by_name['rmsprop'])
_OPTIMIZERCONFIG.fields_by_name['rmsprop'].containing_oneof = _OPTIMIZERCONFIG.oneofs_by_name['optim']
DESCRIPTOR.message_types_by_name['SgdOptimizerConfig'] = _SGDOPTIMIZERCONFIG
DESCRIPTOR.message_types_by_name['AdamOptimizerConfig'] = _ADAMOPTIMIZERCONFIG
DESCRIPTOR.message_types_by_name['RmspropOptimizerConfig'] = _RMSPROPOPTIMIZERCONFIG
DESCRIPTOR.message_types_by_name['OptimizerConfig'] = _OPTIMIZERCONFIG
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
SgdOptimizerConfig = _reflection.GeneratedProtocolMessageType('SgdOptimizerConfig', (_message.Message,), dict(
DESCRIPTOR = _SGDOPTIMIZERCONFIG,
__module__ = 'nvidia_tao_deploy.cv.classification_tf1.proto.optimizer_config_pb2'
# @@protoc_insertion_point(class_scope:SgdOptimizerConfig)
))
_sym_db.RegisterMessage(SgdOptimizerConfig)
AdamOptimizerConfig = _reflection.GeneratedProtocolMessageType('AdamOptimizerConfig', (_message.Message,), dict(
DESCRIPTOR = _ADAMOPTIMIZERCONFIG,
__module__ = 'nvidia_tao_deploy.cv.classification_tf1.proto.optimizer_config_pb2'
# @@protoc_insertion_point(class_scope:AdamOptimizerConfig)
))
_sym_db.RegisterMessage(AdamOptimizerConfig)
RmspropOptimizerConfig = _reflection.GeneratedProtocolMessageType('RmspropOptimizerConfig', (_message.Message,), dict(
DESCRIPTOR = _RMSPROPOPTIMIZERCONFIG,
__module__ = 'nvidia_tao_deploy.cv.classification_tf1.proto.optimizer_config_pb2'
# @@protoc_insertion_point(class_scope:RmspropOptimizerConfig)
))
_sym_db.RegisterMessage(RmspropOptimizerConfig)
OptimizerConfig = _reflection.GeneratedProtocolMessageType('OptimizerConfig', (_message.Message,), dict(
DESCRIPTOR = _OPTIMIZERCONFIG,
__module__ = 'nvidia_tao_deploy.cv.classification_tf1.proto.optimizer_config_pb2'
# @@protoc_insertion_point(class_scope:OptimizerConfig)
))
_sym_db.RegisterMessage(OptimizerConfig)
# @@protoc_insertion_point(module_scope)
| tao_deploy-main | nvidia_tao_deploy/cv/classification_tf1/proto/optimizer_config_pb2.py |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nvidia_tao_deploy/cv/classification_tf1/proto/train_config.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from nvidia_tao_deploy.cv.classification_tf1.proto import visualizer_config_pb2 as nvidia__tao__deploy_dot_cv_dot_classification__tf1_dot_proto_dot_visualizer__config__pb2
from nvidia_tao_deploy.cv.classification_tf1.proto import lr_config_pb2 as nvidia__tao__deploy_dot_cv_dot_classification__tf1_dot_proto_dot_lr__config__pb2
from nvidia_tao_deploy.cv.classification_tf1.proto import optimizer_config_pb2 as nvidia__tao__deploy_dot_cv_dot_classification__tf1_dot_proto_dot_optimizer__config__pb2
from nvidia_tao_deploy.cv.classification_tf1.proto import regularizer_config_pb2 as nvidia__tao__deploy_dot_cv_dot_classification__tf1_dot_proto_dot_regularizer__config__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='nvidia_tao_deploy/cv/classification_tf1/proto/train_config.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n@nvidia_tao_deploy/cv/classification_tf1/proto/train_config.proto\x1a\x45nvidia_tao_deploy/cv/classification_tf1/proto/visualizer_config.proto\x1a=nvidia_tao_deploy/cv/classification_tf1/proto/lr_config.proto\x1a\x44nvidia_tao_deploy/cv/classification_tf1/proto/optimizer_config.proto\x1a\x46nvidia_tao_deploy/cv/classification_tf1/proto/regularizer_config.proto\"\x83\x05\n\x0bTrainConfig\x12\x1a\n\x12train_dataset_path\x18\x01 \x01(\t\x12\x18\n\x10val_dataset_path\x18\x02 \x01(\t\x12\x1d\n\x15pretrained_model_path\x18\x03 \x01(\t\x12#\n\toptimizer\x18\x04 \x01(\x0b\x32\x10.OptimizerConfig\x12\x1a\n\x12\x62\x61tch_size_per_gpu\x18\x05 \x01(\r\x12\x10\n\x08n_epochs\x18\x06 \x01(\r\x12\x11\n\tn_workers\x18\x07 \x01(\r\x12\x1e\n\nreg_config\x18\x08 \x01(\x0b\x32\n.RegConfig\x12\x1c\n\tlr_config\x18\t \x01(\x0b\x32\t.LRConfig\x12\x13\n\x0brandom_seed\x18\n \x01(\r\x12\x1a\n\x12\x65nable_random_crop\x18\x0b \x01(\x08\x12\x1a\n\x12\x65nable_center_crop\x18\x0e \x01(\x08\x12!\n\x19\x65nable_color_augmentation\x18\x0f \x01(\x08\x12\x17\n\x0flabel_smoothing\x18\x0c \x01(\x02\x12\x17\n\x0fpreprocess_mode\x18\r \x01(\t\x12\x13\n\x0bmixup_alpha\x18\x10 \x01(\x02\x12\x19\n\x11model_parallelism\x18\x11 \x03(\x02\x12/\n\nimage_mean\x18\x12 \x03(\x0b\x32\x1b.TrainConfig.ImageMeanEntry\x12\x1f\n\x17\x64isable_horizontal_flip\x18\x13 \x01(\x08\x12%\n\nvisualizer\x18\x14 \x01(\x0b\x32\x11.VisualizerConfig\x1a\x30\n\x0eImageMeanEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x02:\x02\x38\x01\x62\x06proto3')
,
dependencies=[nvidia__tao__deploy_dot_cv_dot_classification__tf1_dot_proto_dot_visualizer__config__pb2.DESCRIPTOR,nvidia__tao__deploy_dot_cv_dot_classification__tf1_dot_proto_dot_lr__config__pb2.DESCRIPTOR,nvidia__tao__deploy_dot_cv_dot_classification__tf1_dot_proto_dot_optimizer__config__pb2.DESCRIPTOR,nvidia__tao__deploy_dot_cv_dot_classification__tf1_dot_proto_dot_regularizer__config__pb2.DESCRIPTOR,])
_TRAINCONFIG_IMAGEMEANENTRY = _descriptor.Descriptor(
name='ImageMeanEntry',
full_name='TrainConfig.ImageMeanEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='TrainConfig.ImageMeanEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='TrainConfig.ImageMeanEntry.value', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=_b('8\001'),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=940,
serialized_end=988,
)
_TRAINCONFIG = _descriptor.Descriptor(
name='TrainConfig',
full_name='TrainConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='train_dataset_path', full_name='TrainConfig.train_dataset_path', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='val_dataset_path', full_name='TrainConfig.val_dataset_path', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='pretrained_model_path', full_name='TrainConfig.pretrained_model_path', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='optimizer', full_name='TrainConfig.optimizer', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='batch_size_per_gpu', full_name='TrainConfig.batch_size_per_gpu', index=4,
number=5, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='n_epochs', full_name='TrainConfig.n_epochs', index=5,
number=6, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='n_workers', full_name='TrainConfig.n_workers', index=6,
number=7, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='reg_config', full_name='TrainConfig.reg_config', index=7,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='lr_config', full_name='TrainConfig.lr_config', index=8,
number=9, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='random_seed', full_name='TrainConfig.random_seed', index=9,
number=10, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='enable_random_crop', full_name='TrainConfig.enable_random_crop', index=10,
number=11, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='enable_center_crop', full_name='TrainConfig.enable_center_crop', index=11,
number=14, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='enable_color_augmentation', full_name='TrainConfig.enable_color_augmentation', index=12,
number=15, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='label_smoothing', full_name='TrainConfig.label_smoothing', index=13,
number=12, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='preprocess_mode', full_name='TrainConfig.preprocess_mode', index=14,
number=13, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='mixup_alpha', full_name='TrainConfig.mixup_alpha', index=15,
number=16, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='model_parallelism', full_name='TrainConfig.model_parallelism', index=16,
number=17, type=2, cpp_type=6, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='image_mean', full_name='TrainConfig.image_mean', index=17,
number=18, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='disable_horizontal_flip', full_name='TrainConfig.disable_horizontal_flip', index=18,
number=19, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='visualizer', full_name='TrainConfig.visualizer', index=19,
number=20, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_TRAINCONFIG_IMAGEMEANENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=345,
serialized_end=988,
)
_TRAINCONFIG_IMAGEMEANENTRY.containing_type = _TRAINCONFIG
_TRAINCONFIG.fields_by_name['optimizer'].message_type = nvidia__tao__deploy_dot_cv_dot_classification__tf1_dot_proto_dot_optimizer__config__pb2._OPTIMIZERCONFIG
_TRAINCONFIG.fields_by_name['reg_config'].message_type = nvidia__tao__deploy_dot_cv_dot_classification__tf1_dot_proto_dot_regularizer__config__pb2._REGCONFIG
_TRAINCONFIG.fields_by_name['lr_config'].message_type = nvidia__tao__deploy_dot_cv_dot_classification__tf1_dot_proto_dot_lr__config__pb2._LRCONFIG
_TRAINCONFIG.fields_by_name['image_mean'].message_type = _TRAINCONFIG_IMAGEMEANENTRY
_TRAINCONFIG.fields_by_name['visualizer'].message_type = nvidia__tao__deploy_dot_cv_dot_classification__tf1_dot_proto_dot_visualizer__config__pb2._VISUALIZERCONFIG
DESCRIPTOR.message_types_by_name['TrainConfig'] = _TRAINCONFIG
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
TrainConfig = _reflection.GeneratedProtocolMessageType('TrainConfig', (_message.Message,), dict(
ImageMeanEntry = _reflection.GeneratedProtocolMessageType('ImageMeanEntry', (_message.Message,), dict(
DESCRIPTOR = _TRAINCONFIG_IMAGEMEANENTRY,
__module__ = 'nvidia_tao_deploy.cv.classification_tf1.proto.train_config_pb2'
# @@protoc_insertion_point(class_scope:TrainConfig.ImageMeanEntry)
))
,
DESCRIPTOR = _TRAINCONFIG,
__module__ = 'nvidia_tao_deploy.cv.classification_tf1.proto.train_config_pb2'
# @@protoc_insertion_point(class_scope:TrainConfig)
))
_sym_db.RegisterMessage(TrainConfig)
_sym_db.RegisterMessage(TrainConfig.ImageMeanEntry)
_TRAINCONFIG_IMAGEMEANENTRY._options = None
# @@protoc_insertion_point(module_scope)
| tao_deploy-main | nvidia_tao_deploy/cv/classification_tf1/proto/train_config_pb2.py |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nvidia_tao_deploy/cv/classification_tf1/proto/regularizer_config.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='nvidia_tao_deploy/cv/classification_tf1/proto/regularizer_config.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\nFnvidia_tao_deploy/cv/classification_tf1/proto/regularizer_config.proto\">\n\tRegConfig\x12\x0c\n\x04type\x18\x01 \x01(\t\x12\r\n\x05scope\x18\x02 \x01(\t\x12\x14\n\x0cweight_decay\x18\x03 \x01(\x02\x62\x06proto3')
)
_REGCONFIG = _descriptor.Descriptor(
name='RegConfig',
full_name='RegConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='type', full_name='RegConfig.type', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='scope', full_name='RegConfig.scope', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='weight_decay', full_name='RegConfig.weight_decay', index=2,
number=3, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=74,
serialized_end=136,
)
DESCRIPTOR.message_types_by_name['RegConfig'] = _REGCONFIG
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
RegConfig = _reflection.GeneratedProtocolMessageType('RegConfig', (_message.Message,), dict(
DESCRIPTOR = _REGCONFIG,
__module__ = 'nvidia_tao_deploy.cv.classification_tf1.proto.regularizer_config_pb2'
# @@protoc_insertion_point(class_scope:RegConfig)
))
_sym_db.RegisterMessage(RegConfig)
# @@protoc_insertion_point(module_scope)
| tao_deploy-main | nvidia_tao_deploy/cv/classification_tf1/proto/regularizer_config_pb2.py |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nvidia_tao_deploy/cv/classification_tf1/proto/lr_config.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='nvidia_tao_deploy/cv/classification_tf1/proto/lr_config.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n=nvidia_tao_deploy/cv/classification_tf1/proto/lr_config.proto\"G\n\x0cStepLrConfig\x12\x15\n\rlearning_rate\x18\x01 \x01(\x02\x12\x11\n\tstep_size\x18\x02 \x01(\r\x12\r\n\x05gamma\x18\x03 \x01(\x02\"t\n\x12SoftAnnealLrConfig\x12\x15\n\rlearning_rate\x18\x01 \x01(\x02\x12\x12\n\nsoft_start\x18\x02 \x01(\x02\x12\x19\n\x11\x61nnealing_divider\x18\x03 \x01(\x02\x12\x18\n\x10\x61nnealing_points\x18\x07 \x03(\x02\"Q\n\x0e\x43osineLrConfig\x12\x15\n\rlearning_rate\x18\x01 \x01(\x02\x12\x14\n\x0cmin_lr_ratio\x18\x02 \x01(\x02\x12\x12\n\nsoft_start\x18\x03 \x01(\x02\"\x88\x01\n\x08LRConfig\x12\x1d\n\x04step\x18\x01 \x01(\x0b\x32\r.StepLrConfigH\x00\x12*\n\x0bsoft_anneal\x18\x02 \x01(\x0b\x32\x13.SoftAnnealLrConfigH\x00\x12!\n\x06\x63osine\x18\x03 \x01(\x0b\x32\x0f.CosineLrConfigH\x00\x42\x0e\n\x0clr_schedulerb\x06proto3')
)
_STEPLRCONFIG = _descriptor.Descriptor(
name='StepLrConfig',
full_name='StepLrConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='learning_rate', full_name='StepLrConfig.learning_rate', index=0,
number=1, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='step_size', full_name='StepLrConfig.step_size', index=1,
number=2, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='gamma', full_name='StepLrConfig.gamma', index=2,
number=3, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=65,
serialized_end=136,
)
_SOFTANNEALLRCONFIG = _descriptor.Descriptor(
name='SoftAnnealLrConfig',
full_name='SoftAnnealLrConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='learning_rate', full_name='SoftAnnealLrConfig.learning_rate', index=0,
number=1, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='soft_start', full_name='SoftAnnealLrConfig.soft_start', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='annealing_divider', full_name='SoftAnnealLrConfig.annealing_divider', index=2,
number=3, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='annealing_points', full_name='SoftAnnealLrConfig.annealing_points', index=3,
number=7, type=2, cpp_type=6, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=138,
serialized_end=254,
)
_COSINELRCONFIG = _descriptor.Descriptor(
name='CosineLrConfig',
full_name='CosineLrConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='learning_rate', full_name='CosineLrConfig.learning_rate', index=0,
number=1, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='min_lr_ratio', full_name='CosineLrConfig.min_lr_ratio', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='soft_start', full_name='CosineLrConfig.soft_start', index=2,
number=3, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=256,
serialized_end=337,
)
_LRCONFIG = _descriptor.Descriptor(
name='LRConfig',
full_name='LRConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='step', full_name='LRConfig.step', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='soft_anneal', full_name='LRConfig.soft_anneal', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='cosine', full_name='LRConfig.cosine', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='lr_scheduler', full_name='LRConfig.lr_scheduler',
index=0, containing_type=None, fields=[]),
],
serialized_start=340,
serialized_end=476,
)
_LRCONFIG.fields_by_name['step'].message_type = _STEPLRCONFIG
_LRCONFIG.fields_by_name['soft_anneal'].message_type = _SOFTANNEALLRCONFIG
_LRCONFIG.fields_by_name['cosine'].message_type = _COSINELRCONFIG
_LRCONFIG.oneofs_by_name['lr_scheduler'].fields.append(
_LRCONFIG.fields_by_name['step'])
_LRCONFIG.fields_by_name['step'].containing_oneof = _LRCONFIG.oneofs_by_name['lr_scheduler']
_LRCONFIG.oneofs_by_name['lr_scheduler'].fields.append(
_LRCONFIG.fields_by_name['soft_anneal'])
_LRCONFIG.fields_by_name['soft_anneal'].containing_oneof = _LRCONFIG.oneofs_by_name['lr_scheduler']
_LRCONFIG.oneofs_by_name['lr_scheduler'].fields.append(
_LRCONFIG.fields_by_name['cosine'])
_LRCONFIG.fields_by_name['cosine'].containing_oneof = _LRCONFIG.oneofs_by_name['lr_scheduler']
DESCRIPTOR.message_types_by_name['StepLrConfig'] = _STEPLRCONFIG
DESCRIPTOR.message_types_by_name['SoftAnnealLrConfig'] = _SOFTANNEALLRCONFIG
DESCRIPTOR.message_types_by_name['CosineLrConfig'] = _COSINELRCONFIG
DESCRIPTOR.message_types_by_name['LRConfig'] = _LRCONFIG
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
StepLrConfig = _reflection.GeneratedProtocolMessageType('StepLrConfig', (_message.Message,), dict(
DESCRIPTOR = _STEPLRCONFIG,
__module__ = 'nvidia_tao_deploy.cv.classification_tf1.proto.lr_config_pb2'
# @@protoc_insertion_point(class_scope:StepLrConfig)
))
_sym_db.RegisterMessage(StepLrConfig)
SoftAnnealLrConfig = _reflection.GeneratedProtocolMessageType('SoftAnnealLrConfig', (_message.Message,), dict(
DESCRIPTOR = _SOFTANNEALLRCONFIG,
__module__ = 'nvidia_tao_deploy.cv.classification_tf1.proto.lr_config_pb2'
# @@protoc_insertion_point(class_scope:SoftAnnealLrConfig)
))
_sym_db.RegisterMessage(SoftAnnealLrConfig)
CosineLrConfig = _reflection.GeneratedProtocolMessageType('CosineLrConfig', (_message.Message,), dict(
DESCRIPTOR = _COSINELRCONFIG,
__module__ = 'nvidia_tao_deploy.cv.classification_tf1.proto.lr_config_pb2'
# @@protoc_insertion_point(class_scope:CosineLrConfig)
))
_sym_db.RegisterMessage(CosineLrConfig)
LRConfig = _reflection.GeneratedProtocolMessageType('LRConfig', (_message.Message,), dict(
DESCRIPTOR = _LRCONFIG,
__module__ = 'nvidia_tao_deploy.cv.classification_tf1.proto.lr_config_pb2'
# @@protoc_insertion_point(class_scope:LRConfig)
))
_sym_db.RegisterMessage(LRConfig)
# @@protoc_insertion_point(module_scope)
| tao_deploy-main | nvidia_tao_deploy/cv/classification_tf1/proto/lr_config_pb2.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TAO Deploy Classification Proto."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_deploy-main | nvidia_tao_deploy/cv/classification_tf1/proto/__init__.py |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nvidia_tao_deploy/cv/classification_tf1/proto/visualizer_config.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='nvidia_tao_deploy/cv/classification_tf1/proto/visualizer_config.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\nEnvidia_tao_deploy/cv/classification_tf1/proto/visualizer_config.proto\"R\n\x10VisualizerConfig\x12\x0f\n\x07\x65nabled\x18\x01 \x01(\x08\x12\x12\n\nnum_images\x18\x02 \x01(\r\x12\x19\n\x11weight_histograms\x18\x03 \x01(\x08\x62\x06proto3')
)
_VISUALIZERCONFIG = _descriptor.Descriptor(
name='VisualizerConfig',
full_name='VisualizerConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='enabled', full_name='VisualizerConfig.enabled', index=0,
number=1, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='num_images', full_name='VisualizerConfig.num_images', index=1,
number=2, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='weight_histograms', full_name='VisualizerConfig.weight_histograms', index=2,
number=3, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=73,
serialized_end=155,
)
DESCRIPTOR.message_types_by_name['VisualizerConfig'] = _VISUALIZERCONFIG
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
VisualizerConfig = _reflection.GeneratedProtocolMessageType('VisualizerConfig', (_message.Message,), dict(
DESCRIPTOR = _VISUALIZERCONFIG,
__module__ = 'nvidia_tao_deploy.cv.classification_tf1.proto.visualizer_config_pb2'
# @@protoc_insertion_point(class_scope:VisualizerConfig)
))
_sym_db.RegisterMessage(VisualizerConfig)
# @@protoc_insertion_point(module_scope)
| tao_deploy-main | nvidia_tao_deploy/cv/classification_tf1/proto/visualizer_config_pb2.py |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nvidia_tao_deploy/cv/classification_tf1/proto/experiment.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from nvidia_tao_deploy.cv.classification_tf1.proto import model_config_pb2 as nvidia__tao__deploy_dot_cv_dot_classification__tf1_dot_proto_dot_model__config__pb2
from nvidia_tao_deploy.cv.classification_tf1.proto import eval_config_pb2 as nvidia__tao__deploy_dot_cv_dot_classification__tf1_dot_proto_dot_eval__config__pb2
from nvidia_tao_deploy.cv.classification_tf1.proto import train_config_pb2 as nvidia__tao__deploy_dot_cv_dot_classification__tf1_dot_proto_dot_train__config__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='nvidia_tao_deploy/cv/classification_tf1/proto/experiment.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n>nvidia_tao_deploy/cv/classification_tf1/proto/experiment.proto\x1a@nvidia_tao_deploy/cv/classification_tf1/proto/model_config.proto\x1a?nvidia_tao_deploy/cv/classification_tf1/proto/eval_config.proto\x1a@nvidia_tao_deploy/cv/classification_tf1/proto/train_config.proto\"v\n\nExperiment\x12 \n\x0b\x65val_config\x18\x01 \x01(\x0b\x32\x0b.EvalConfig\x12\"\n\x0cmodel_config\x18\x02 \x01(\x0b\x32\x0c.ModelConfig\x12\"\n\x0ctrain_config\x18\x03 \x01(\x0b\x32\x0c.TrainConfigb\x06proto3')
,
dependencies=[nvidia__tao__deploy_dot_cv_dot_classification__tf1_dot_proto_dot_model__config__pb2.DESCRIPTOR,nvidia__tao__deploy_dot_cv_dot_classification__tf1_dot_proto_dot_eval__config__pb2.DESCRIPTOR,nvidia__tao__deploy_dot_cv_dot_classification__tf1_dot_proto_dot_train__config__pb2.DESCRIPTOR,])
_EXPERIMENT = _descriptor.Descriptor(
name='Experiment',
full_name='Experiment',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='eval_config', full_name='Experiment.eval_config', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='model_config', full_name='Experiment.model_config', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='train_config', full_name='Experiment.train_config', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=263,
serialized_end=381,
)
_EXPERIMENT.fields_by_name['eval_config'].message_type = nvidia__tao__deploy_dot_cv_dot_classification__tf1_dot_proto_dot_eval__config__pb2._EVALCONFIG
_EXPERIMENT.fields_by_name['model_config'].message_type = nvidia__tao__deploy_dot_cv_dot_classification__tf1_dot_proto_dot_model__config__pb2._MODELCONFIG
_EXPERIMENT.fields_by_name['train_config'].message_type = nvidia__tao__deploy_dot_cv_dot_classification__tf1_dot_proto_dot_train__config__pb2._TRAINCONFIG
DESCRIPTOR.message_types_by_name['Experiment'] = _EXPERIMENT
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Experiment = _reflection.GeneratedProtocolMessageType('Experiment', (_message.Message,), dict(
DESCRIPTOR = _EXPERIMENT,
__module__ = 'nvidia_tao_deploy.cv.classification_tf1.proto.experiment_pb2'
# @@protoc_insertion_point(class_scope:Experiment)
))
_sym_db.RegisterMessage(Experiment)
# @@protoc_insertion_point(module_scope)
| tao_deploy-main | nvidia_tao_deploy/cv/classification_tf1/proto/experiment_pb2.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TAO Deploy Config Base Utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from google.protobuf.text_format import Merge as merge_text_proto
from nvidia_tao_deploy.cv.classification_tf1.proto.experiment_pb2 import Experiment
def load_proto(config):
"""Load the experiment proto."""
proto = Experiment()
def _load_from_file(filename, pb2):
if not os.path.exists(filename):
raise IOError(f"Specfile not found at: {filename}")
with open(filename, "r", encoding="utf-8") as f:
merge_text_proto(f.read(), pb2)
_load_from_file(config, proto)
return proto
| tao_deploy-main | nvidia_tao_deploy/cv/classification_tf1/proto/utils.py |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nvidia_tao_deploy/cv/classification_tf1/proto/eval_config.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='nvidia_tao_deploy/cv/classification_tf1/proto/eval_config.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n?nvidia_tao_deploy/cv/classification_tf1/proto/eval_config.proto\"\x8d\x01\n\nEvalConfig\x12\r\n\x05top_k\x18\x01 \x01(\r\x12\x19\n\x11\x65val_dataset_path\x18\x02 \x01(\t\x12\x12\n\nmodel_path\x18\x03 \x01(\t\x12\x12\n\nbatch_size\x18\x04 \x01(\r\x12\x11\n\tn_workers\x18\x05 \x01(\r\x12\x1a\n\x12\x65nable_center_crop\x18\x06 \x01(\x08\x62\x06proto3')
)
_EVALCONFIG = _descriptor.Descriptor(
name='EvalConfig',
full_name='EvalConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='top_k', full_name='EvalConfig.top_k', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='eval_dataset_path', full_name='EvalConfig.eval_dataset_path', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='model_path', full_name='EvalConfig.model_path', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='batch_size', full_name='EvalConfig.batch_size', index=3,
number=4, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='n_workers', full_name='EvalConfig.n_workers', index=4,
number=5, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='enable_center_crop', full_name='EvalConfig.enable_center_crop', index=5,
number=6, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=68,
serialized_end=209,
)
DESCRIPTOR.message_types_by_name['EvalConfig'] = _EVALCONFIG
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
EvalConfig = _reflection.GeneratedProtocolMessageType('EvalConfig', (_message.Message,), dict(
DESCRIPTOR = _EVALCONFIG,
__module__ = 'nvidia_tao_deploy.cv.classification_tf1.proto.eval_config_pb2'
# @@protoc_insertion_point(class_scope:EvalConfig)
))
_sym_db.RegisterMessage(EvalConfig)
# @@protoc_insertion_point(module_scope)
| tao_deploy-main | nvidia_tao_deploy/cv/classification_tf1/proto/eval_config_pb2.py |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nvidia_tao_deploy/cv/classification_tf1/proto/model_config.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='nvidia_tao_deploy/cv/classification_tf1/proto/model_config.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n@nvidia_tao_deploy/cv/classification_tf1/proto/model_config.proto\"4\n\x0f\x42\x61tchNormConfig\x12\x10\n\x08momentum\x18\x01 \x01(\x02\x12\x0f\n\x07\x65psilon\x18\x02 \x01(\x02\"\xa8\x01\n\nActivation\x12\x17\n\x0f\x61\x63tivation_type\x18\x01 \x01(\t\x12\x44\n\x15\x61\x63tivation_parameters\x18\x02 \x03(\x0b\x32%.Activation.ActivationParametersEntry\x1a;\n\x19\x41\x63tivationParametersEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x02:\x02\x38\x01\"\x8c\x03\n\x0bModelConfig\x12\x0c\n\x04\x61rch\x18\x01 \x01(\t\x12\x18\n\x10input_image_size\x18\x02 \x01(\t\x12\x39\n\x1bresize_interpolation_method\x18\x0c \x01(\x0e\x32\x14.InterpolationMethod\x12\x10\n\x08n_layers\x18\x03 \x01(\r\x12\x13\n\x0bretain_head\x18\x04 \x01(\x08\x12\x16\n\x0euse_batch_norm\x18\x05 \x01(\x08\x12\x10\n\x08use_bias\x18\x06 \x01(\x08\x12\x13\n\x0buse_pooling\x18\x07 \x01(\x08\x12\x17\n\x0f\x61ll_projections\x18\x08 \x01(\x08\x12\x11\n\tfreeze_bn\x18\t \x01(\x08\x12\x15\n\rfreeze_blocks\x18\n \x03(\r\x12\x0f\n\x07\x64ropout\x18\x0b \x01(\x02\x12+\n\x11\x62\x61tch_norm_config\x18\r \x01(\x0b\x32\x10.BatchNormConfig\x12\x1f\n\nactivation\x18\x0e \x01(\x0b\x32\x0b.Activation\x12\x12\n\nbyom_model\x18\x0f \x01(\t*0\n\x13InterpolationMethod\x12\x0c\n\x08\x42ILINEAR\x10\x00\x12\x0b\n\x07\x42ICUBIC\x10\x01\x62\x06proto3')
)
_INTERPOLATIONMETHOD = _descriptor.EnumDescriptor(
name='InterpolationMethod',
full_name='InterpolationMethod',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='BILINEAR', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='BICUBIC', index=1, number=1,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=692,
serialized_end=740,
)
_sym_db.RegisterEnumDescriptor(_INTERPOLATIONMETHOD)
InterpolationMethod = enum_type_wrapper.EnumTypeWrapper(_INTERPOLATIONMETHOD)
BILINEAR = 0
BICUBIC = 1
_BATCHNORMCONFIG = _descriptor.Descriptor(
name='BatchNormConfig',
full_name='BatchNormConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='momentum', full_name='BatchNormConfig.momentum', index=0,
number=1, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='epsilon', full_name='BatchNormConfig.epsilon', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=68,
serialized_end=120,
)
_ACTIVATION_ACTIVATIONPARAMETERSENTRY = _descriptor.Descriptor(
name='ActivationParametersEntry',
full_name='Activation.ActivationParametersEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='Activation.ActivationParametersEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='Activation.ActivationParametersEntry.value', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=_b('8\001'),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=232,
serialized_end=291,
)
_ACTIVATION = _descriptor.Descriptor(
name='Activation',
full_name='Activation',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='activation_type', full_name='Activation.activation_type', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='activation_parameters', full_name='Activation.activation_parameters', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_ACTIVATION_ACTIVATIONPARAMETERSENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=123,
serialized_end=291,
)
_MODELCONFIG = _descriptor.Descriptor(
name='ModelConfig',
full_name='ModelConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='arch', full_name='ModelConfig.arch', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='input_image_size', full_name='ModelConfig.input_image_size', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='resize_interpolation_method', full_name='ModelConfig.resize_interpolation_method', index=2,
number=12, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='n_layers', full_name='ModelConfig.n_layers', index=3,
number=3, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='retain_head', full_name='ModelConfig.retain_head', index=4,
number=4, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='use_batch_norm', full_name='ModelConfig.use_batch_norm', index=5,
number=5, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='use_bias', full_name='ModelConfig.use_bias', index=6,
number=6, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='use_pooling', full_name='ModelConfig.use_pooling', index=7,
number=7, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='all_projections', full_name='ModelConfig.all_projections', index=8,
number=8, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='freeze_bn', full_name='ModelConfig.freeze_bn', index=9,
number=9, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='freeze_blocks', full_name='ModelConfig.freeze_blocks', index=10,
number=10, type=13, cpp_type=3, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='dropout', full_name='ModelConfig.dropout', index=11,
number=11, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='batch_norm_config', full_name='ModelConfig.batch_norm_config', index=12,
number=13, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='activation', full_name='ModelConfig.activation', index=13,
number=14, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='byom_model', full_name='ModelConfig.byom_model', index=14,
number=15, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=294,
serialized_end=690,
)
_ACTIVATION_ACTIVATIONPARAMETERSENTRY.containing_type = _ACTIVATION
_ACTIVATION.fields_by_name['activation_parameters'].message_type = _ACTIVATION_ACTIVATIONPARAMETERSENTRY
_MODELCONFIG.fields_by_name['resize_interpolation_method'].enum_type = _INTERPOLATIONMETHOD
_MODELCONFIG.fields_by_name['batch_norm_config'].message_type = _BATCHNORMCONFIG
_MODELCONFIG.fields_by_name['activation'].message_type = _ACTIVATION
DESCRIPTOR.message_types_by_name['BatchNormConfig'] = _BATCHNORMCONFIG
DESCRIPTOR.message_types_by_name['Activation'] = _ACTIVATION
DESCRIPTOR.message_types_by_name['ModelConfig'] = _MODELCONFIG
DESCRIPTOR.enum_types_by_name['InterpolationMethod'] = _INTERPOLATIONMETHOD
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
BatchNormConfig = _reflection.GeneratedProtocolMessageType('BatchNormConfig', (_message.Message,), dict(
DESCRIPTOR = _BATCHNORMCONFIG,
__module__ = 'nvidia_tao_deploy.cv.classification_tf1.proto.model_config_pb2'
# @@protoc_insertion_point(class_scope:BatchNormConfig)
))
_sym_db.RegisterMessage(BatchNormConfig)
Activation = _reflection.GeneratedProtocolMessageType('Activation', (_message.Message,), dict(
ActivationParametersEntry = _reflection.GeneratedProtocolMessageType('ActivationParametersEntry', (_message.Message,), dict(
DESCRIPTOR = _ACTIVATION_ACTIVATIONPARAMETERSENTRY,
__module__ = 'nvidia_tao_deploy.cv.classification_tf1.proto.model_config_pb2'
# @@protoc_insertion_point(class_scope:Activation.ActivationParametersEntry)
))
,
DESCRIPTOR = _ACTIVATION,
__module__ = 'nvidia_tao_deploy.cv.classification_tf1.proto.model_config_pb2'
# @@protoc_insertion_point(class_scope:Activation)
))
_sym_db.RegisterMessage(Activation)
_sym_db.RegisterMessage(Activation.ActivationParametersEntry)
ModelConfig = _reflection.GeneratedProtocolMessageType('ModelConfig', (_message.Message,), dict(
DESCRIPTOR = _MODELCONFIG,
__module__ = 'nvidia_tao_deploy.cv.classification_tf1.proto.model_config_pb2'
# @@protoc_insertion_point(class_scope:ModelConfig)
))
_sym_db.RegisterMessage(ModelConfig)
_ACTIVATION_ACTIVATIONPARAMETERSENTRY._options = None
# @@protoc_insertion_point(module_scope)
| tao_deploy-main | nvidia_tao_deploy/cv/classification_tf1/proto/model_config_pb2.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classification convert etlt/onnx model to TRT engine."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import logging
import os
import tempfile
from nvidia_tao_deploy.cv.classification_tf1.proto.utils import load_proto
from nvidia_tao_deploy.cv.classification_tf1.engine_builder import ClassificationEngineBuilder
from nvidia_tao_deploy.utils.decoding import decode_model
from nvidia_tao_deploy.cv.common.decorators import monitor_status
logging.basicConfig(format='%(asctime)s [TAO Toolkit] [%(levelname)s] %(name)s %(lineno)d: %(message)s',
level="INFO")
logger = logging.getLogger(__name__)
DEFAULT_MAX_BATCH_SIZE = 1
DEFAULT_MIN_BATCH_SIZE = 1
DEFAULT_OPT_BATCH_SIZE = 1
@monitor_status(name='classification_tf1', mode='gen_trt_engine')
def main(args):
"""Classification TRT convert."""
# decrypt etlt
tmp_onnx_file, file_format = decode_model(args.model_path, args.key)
# Load from proto-based spec file
es = load_proto(args.experiment_spec)
image_mean = es.train_config.image_mean
if image_mean:
assert all(c in image_mean for c in ['r', 'g', 'b']), (
"'r', 'g', 'b' should all be present in image_mean "
"for images with 3 channels."
)
image_mean = [image_mean['b'], image_mean['g'], image_mean['r']]
else:
image_mean = [103.939, 116.779, 123.68]
if args.engine_file is not None or args.data_type == 'int8':
if args.engine_file is None:
engine_handle, temp_engine_path = tempfile.mkstemp()
os.close(engine_handle)
output_engine_path = temp_engine_path
else:
output_engine_path = args.engine_file
builder = ClassificationEngineBuilder(verbose=args.verbose,
image_mean=image_mean,
workspace=args.max_workspace_size,
min_batch_size=args.min_batch_size,
opt_batch_size=args.opt_batch_size,
max_batch_size=args.max_batch_size,
strict_type_constraints=args.strict_type_constraints)
builder.create_network(tmp_onnx_file, file_format)
builder.create_engine(
output_engine_path,
args.data_type,
calib_data_file=args.cal_data_file,
calib_input=args.cal_image_dir,
calib_cache=args.cal_cache_file,
calib_num_images=args.batch_size * args.batches,
calib_batch_size=args.batch_size)
logging.info("Export finished successfully.")
def build_command_line_parser(parser=None):
"""Build the command line parser using argparse.
Args:
parser (subparser): Provided from the wrapper script to build a chained
parser mechanism.
Returns:
parser
"""
if parser is None:
parser = argparse.ArgumentParser(prog='gen_trt_engine', description='Generate TRT engine of classification model.')
parser.add_argument(
'-m',
'--model_path',
type=str,
required=True,
help='Path to a classification .etlt or .onnx model file.'
)
parser.add_argument(
'-k',
'--key',
type=str,
required=False,
help='Key to save or load a .etlt model.'
)
parser.add_argument(
'-e',
'--experiment_spec',
type=str,
required=True,
help='Path to the experiment spec file.'
)
parser.add_argument(
"--data_type",
type=str,
default="fp32",
help="Data type for the TensorRT export.",
choices=["fp32", "fp16", "int8"])
parser.add_argument(
"--cal_image_dir",
default="",
type=str,
help="Directory of images to run int8 calibration.")
parser.add_argument(
"--cal_data_file",
default=None,
type=str,
help="Tensorfile to run calibration for int8 optimization.")
parser.add_argument(
'--cal_cache_file',
default=None,
type=str,
help='Calibration cache file to write to.')
parser.add_argument(
"--engine_file",
type=str,
default=None,
help="Path to the exported TRT engine.")
parser.add_argument(
"--max_batch_size",
type=int,
default=DEFAULT_MAX_BATCH_SIZE,
help="Max batch size for TensorRT engine builder.")
parser.add_argument(
"--min_batch_size",
type=int,
default=DEFAULT_MIN_BATCH_SIZE,
help="Min batch size for TensorRT engine builder.")
parser.add_argument(
"--opt_batch_size",
type=int,
default=DEFAULT_OPT_BATCH_SIZE,
help="Opt batch size for TensorRT engine builder.")
parser.add_argument(
"--batch_size",
type=int,
default=1,
help="Number of images per batch.")
parser.add_argument(
"--batches",
type=int,
default=10,
help="Number of batches to calibrate over.")
parser.add_argument(
"--max_workspace_size",
type=int,
default=2,
help="Max memory workspace size to allow in Gb for TensorRT engine builder (default: 2).")
parser.add_argument(
"-s",
"--strict_type_constraints",
action="store_true",
default=False,
help="A Boolean flag indicating whether to apply the \
TensorRT strict type constraints when building the TensorRT engine.")
parser.add_argument(
"-v",
"--verbose",
action="store_true",
default=False,
help="Verbosity of the logger.")
parser.add_argument(
'-r',
'--results_dir',
type=str,
required=True,
default=None,
help='Output directory where the log is saved.'
)
return parser
def parse_command_line_arguments(args=None):
"""Simple function to parse command line arguments."""
parser = build_command_line_parser(args)
return parser.parse_args(args)
if __name__ == '__main__':
args = parse_command_line_arguments()
main(args)
| tao_deploy-main | nvidia_tao_deploy/cv/classification_tf1/scripts/gen_trt_engine.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TAO Deploy Classification TF1 scripts module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_deploy-main | nvidia_tao_deploy/cv/classification_tf1/scripts/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Standalone TensorRT inference."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import logging
import os
import pandas as pd
import json
import numpy as np
from tqdm.auto import tqdm
from nvidia_tao_deploy.cv.classification_tf1.inferencer import ClassificationInferencer
from nvidia_tao_deploy.cv.classification_tf1.dataloader import ClassificationLoader
from nvidia_tao_deploy.cv.classification_tf1.proto.utils import load_proto
from nvidia_tao_deploy.cv.common.decorators import monitor_status
logging.getLogger('PIL').setLevel(logging.WARNING)
logging.basicConfig(format='%(asctime)s [TAO Toolkit] [%(levelname)s] %(name)s %(lineno)d: %(message)s',
level="INFO")
logger = logging.getLogger(__name__)
@monitor_status(name='classification_tf1', mode='inference')
def main(args):
"""Classification TRT inference."""
# Load from proto-based spec file
es = load_proto(args.experiment_spec)
interpolation = es.model_config.resize_interpolation_method if es.model_config.resize_interpolation_method else 0
interpolation_map = {
0: "bilinear",
1: "bicubic"
}
interpolation_method = interpolation_map[interpolation]
mode = es.train_config.preprocess_mode if es.train_config.preprocess_mode else "caffe"
crop = "center" if es.eval_config.enable_center_crop else None
image_mean = es.train_config.image_mean
if image_mean:
assert all(c in image_mean for c in ['r', 'g', 'b']), (
"'r', 'g', 'b' should all be present in image_mean "
"for images with 3 channels."
)
image_mean = [image_mean['b'], image_mean['g'], image_mean['r']]
else:
image_mean = [103.939, 116.779, 123.68]
data_format = "channels_first" # TF1 is always channels first
batch_size = es.eval_config.batch_size if args.batch_size is None else args.batch_size
image_dirs = args.image_dir
if args.classmap:
# if classmap is provided, we explicitly set the mapping from the json file
if not os.path.exists(args.classmap):
raise FileNotFoundError(f"{args.classmap} does not exist!")
with open(args.classmap, "r", encoding="utf-8") as f:
mapping_dict = json.load(f)
else:
# If not, the order of the classes are alphanumeric as defined by Keras
# Ref: https://github.com/keras-team/keras/blob/07e13740fd181fc3ddec7d9a594d8a08666645f6/keras/preprocessing/image.py#L507
mapping_dict = {}
for idx, subdir in enumerate(sorted(os.listdir(image_dirs))):
if os.path.isdir(os.path.join(image_dirs, subdir)):
mapping_dict[subdir] = idx
trt_infer = ClassificationInferencer(args.model_path, data_format=data_format, batch_size=batch_size)
if trt_infer.etlt_type == "uff" and batch_size != 1:
logger.warning("The etlt file was in deprecated UFF format which does not support dynmaic batch size. "
"Overriding the batch size to 1")
batch_size = 1
dl = ClassificationLoader(
trt_infer._input_shape,
[image_dirs],
mapping_dict,
is_inference=True,
data_format=data_format,
interpolation_method=interpolation_method,
mode=mode,
crop=crop,
batch_size=batch_size,
image_mean=image_mean,
dtype=trt_infer.inputs[0].host.dtype)
if args.results_dir is None:
results_dir = os.path.dirname(args.model_path)
else:
results_dir = args.results_dir
os.makedirs(results_dir, exist_ok=True)
result_csv_path = os.path.join(results_dir, 'result.csv')
with open(result_csv_path, 'w', encoding="utf-8") as csv_f:
for i, (imgs, _) in tqdm(enumerate(dl), total=len(dl), desc="Producing predictions"):
image_paths = dl.image_paths[np.arange(batch_size) + batch_size * i]
y_pred = trt_infer.infer(imgs)
# Class output from softmax layer
class_indices = np.argmax(y_pred, axis=1)
# Map label index to label name
class_labels = map(lambda i: list(mapping_dict.keys())
[list(mapping_dict.values()).index(i)],
class_indices)
conf = np.max(y_pred, axis=1)
# Write predictions to file
df = pd.DataFrame(zip(image_paths, class_labels, conf))
df.to_csv(csv_f, header=False, index=False)
logging.info("Finished inference.")
def build_command_line_parser(parser=None):
"""Build the command line parser using argparse.
Args:
parser (subparser): Provided from the wrapper script to build a chained
parser mechanism.
Returns:
parser
"""
if parser is None:
parser = argparse.ArgumentParser(prog='infer', description='Inference with a Classification TRT model.')
parser.add_argument(
'-i',
'--image_dir',
type=str,
required=True,
default=None,
help='Input directory of images')
parser.add_argument(
'-m',
'--model_path',
type=str,
required=True,
help='Path to the Classification TensorRT engine.'
)
parser.add_argument(
'-e',
'--experiment_spec',
type=str,
required=True,
help='Path to the experiment spec file.'
)
parser.add_argument(
'-b',
'--batch_size',
type=int,
required=False,
default=None,
help='Batch size.')
parser.add_argument(
'-c',
'--classmap',
type=str,
required=False,
default=None,
help='File with class mapping.'
)
parser.add_argument(
'-r',
'--results_dir',
type=str,
required=True,
default=None,
help='Output directory where the log is saved.'
)
return parser
def parse_command_line_arguments(args=None):
"""Simple function to parse command line arguments."""
parser = build_command_line_parser(args)
return parser.parse_args(args)
if __name__ == '__main__':
args = parse_command_line_arguments()
main(args)
| tao_deploy-main | nvidia_tao_deploy/cv/classification_tf1/scripts/inference.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Standalone TensorRT evaluation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import logging
import os
import json
import numpy as np
from tqdm.auto import tqdm
from sklearn.metrics import classification_report, confusion_matrix, top_k_accuracy_score
from nvidia_tao_deploy.cv.classification_tf1.inferencer import ClassificationInferencer
from nvidia_tao_deploy.cv.classification_tf1.dataloader import ClassificationLoader
from nvidia_tao_deploy.cv.common.decorators import monitor_status
from nvidia_tao_deploy.cv.classification_tf1.proto.utils import load_proto
logging.getLogger('PIL').setLevel(logging.WARNING)
logging.basicConfig(format='%(asctime)s [TAO Toolkit] [%(levelname)s] %(name)s %(lineno)d: %(message)s',
level="INFO")
logger = logging.getLogger(__name__)
@monitor_status(name='classification_tf1', mode='evaluation')
def main(args):
"""Classification TRT evaluation."""
# Load from proto-based spec file
es = load_proto(args.experiment_spec)
interpolation = es.model_config.resize_interpolation_method if es.model_config.resize_interpolation_method else 0
interpolation_map = {
0: "bilinear",
1: "bicubic"
}
interpolation_method = interpolation_map[interpolation]
mode = es.train_config.preprocess_mode if es.train_config.preprocess_mode else "caffe"
crop = "center" if es.eval_config.enable_center_crop else None
image_mean = es.train_config.image_mean
if image_mean:
assert all(c in image_mean for c in ['r', 'g', 'b']), (
"'r', 'g', 'b' should all be present in image_mean "
"for images with 3 channels."
)
image_mean = [image_mean['b'], image_mean['g'], image_mean['r']]
else:
image_mean = [103.939, 116.779, 123.68]
top_k = es.eval_config.top_k if es.eval_config.top_k else 5
data_format = "channels_first" # TF1 is always channels first
batch_size = es.eval_config.batch_size if args.batch_size is None else args.batch_size
# Override eval_dataset_path from spec file if image directory is provided
image_dirs = args.image_dir if args.image_dir else es.eval_config.eval_dataset_path
if args.classmap:
# if classmap is provided, we explicitly set the mapping from the json file
if not os.path.exists(args.classmap):
raise FileNotFoundError(f"{args.classmap} does not exist!")
with open(args.classmap, "r", encoding="utf-8") as f:
mapping_dict = json.load(f)
else:
# If not, the order of the classes are alphanumeric as defined by Keras
# Ref: https://github.com/keras-team/keras/blob/07e13740fd181fc3ddec7d9a594d8a08666645f6/keras/preprocessing/image.py#L507
mapping_dict = {}
for idx, subdir in enumerate(sorted(os.listdir(image_dirs))):
if os.path.isdir(os.path.join(image_dirs, subdir)):
mapping_dict[subdir] = idx
target_names = [c[0] for c in sorted(mapping_dict.items(), key=lambda x:x[1])]
trt_infer = ClassificationInferencer(args.model_path, data_format=data_format, batch_size=batch_size)
if trt_infer.etlt_type == "uff" and batch_size != 1:
logger.warning("The etlt file was in deprecated UFF format which does not support dynmaic batch size. "
"Overriding the batch size to 1")
batch_size = 1
dl = ClassificationLoader(
trt_infer._input_shape,
[image_dirs],
mapping_dict,
data_format=data_format,
interpolation_method=interpolation_method,
mode=mode,
crop=crop,
batch_size=batch_size,
image_mean=image_mean,
dtype=trt_infer.inputs[0].host.dtype)
gt_labels = []
pred_labels = []
for imgs, labels in tqdm(dl, total=len(dl), desc="Producing predictions"):
gt_labels.extend(labels)
y_pred = trt_infer.infer(imgs)
pred_labels.extend(y_pred)
# Check output classes
output_num_classes = pred_labels[0].shape[0]
if len(mapping_dict) != output_num_classes:
raise ValueError(f"Provided class map has {len(mapping_dict)} classes while the engine expects {output_num_classes} classes.")
gt_labels = np.array(gt_labels)
pred_labels = np.array(pred_labels)
# Metric calculation
if pred_labels.shape[-1] == 2:
# If there are only two classes, sklearn perceive the problem as binary classification
# and requires predictions to be in (num_samples, ) rather than (num_samples, num_classes)
scores = top_k_accuracy_score(gt_labels, pred_labels[:, 1], k=top_k)
else:
scores = top_k_accuracy_score(gt_labels, pred_labels, k=top_k)
logging.info("Top %s scores: %s", top_k, scores)
logging.info("Confusion Matrix")
y_predictions = np.argmax(pred_labels, axis=1)
print(confusion_matrix(gt_labels, y_predictions))
logging.info("Classification Report")
target_names = [c[0] for c in sorted(mapping_dict.items(), key=lambda x:x[1])]
print(classification_report(gt_labels, y_predictions, target_names=target_names))
# Store evaluation results into JSON
eval_results = {"top_k_accuracy": scores}
if args.results_dir is None:
results_dir = os.path.dirname(args.model_path)
else:
results_dir = args.results_dir
with open(os.path.join(results_dir, "results.json"), "w", encoding="utf-8") as f:
json.dump(eval_results, f)
logging.info("Finished evaluation.")
def build_command_line_parser(parser=None):
"""Build the command line parser using argparse.
Args:
parser (subparser): Provided from the wrapper script to build a chained
parser mechanism.
Returns:
parser
"""
if parser is None:
parser = argparse.ArgumentParser(prog='eval', description='Evaluate with a Classification TRT model.')
parser.add_argument(
'-i',
'--image_dir',
type=str,
required=False,
default=None,
help='Input directory of images')
parser.add_argument(
'-m',
'--model_path',
type=str,
required=True,
help='Path to the Classification TensorRT engine.'
)
parser.add_argument(
'-e',
'--experiment_spec',
type=str,
required=True,
help='Path to the experiment spec file.'
)
parser.add_argument(
'-b',
'--batch_size',
type=int,
required=False,
default=1,
help='Batch size.')
parser.add_argument(
'-c',
'--classmap',
type=str,
required=False,
default=None,
help='File with class mapping.'
)
parser.add_argument(
'-r',
'--results_dir',
type=str,
required=True,
default=None,
help='Output directory where the log is saved.'
)
return parser
def parse_command_line_arguments(args=None):
"""Simple function to parse command line arguments."""
parser = build_command_line_parser(args)
return parser.parse_args(args)
if __name__ == '__main__':
args = parse_command_line_arguments()
main(args)
| tao_deploy-main | nvidia_tao_deploy/cv/classification_tf1/scripts/evaluate.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TAO Deploy command line wrapper to invoke CLI scripts."""
import sys
from nvidia_tao_deploy.cv.common.entrypoint.entrypoint_proto import launch_job
import nvidia_tao_deploy.cv.classification_tf1.scripts
def main():
"""Function to launch the job."""
launch_job(nvidia_tao_deploy.cv.classification_tf1.scripts, "classification_tf1", sys.argv[1:])
if __name__ == "__main__":
main()
| tao_deploy-main | nvidia_tao_deploy/cv/classification_tf1/entrypoint/classification_tf1.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Entrypoint module for classification."""
| tao_deploy-main | nvidia_tao_deploy/cv/classification_tf1/entrypoint/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""EfficientDet TensorRT inferencer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import pycuda.autoinit # noqa pylint: disable=unused-import
import pycuda.driver as cuda
import tensorrt as trt
from nvidia_tao_deploy.inferencer.trt_inferencer import TRTInferencer
class EfficientDetInferencer(TRTInferencer):
"""Implements inference for the EfficientDet TensorRT engine."""
def __init__(self, engine_path, max_detections_per_image=100):
"""Init.
Args:
engine_path (str): The path to the serialized engine to load from disk.
max_detections_per_image (int): The maximum number of detections to visualize
"""
# Load TRT engine
super().__init__(engine_path)
self.max_detections_per_image = max_detections_per_image
# Setup I/O bindings
self.inputs = []
self.outputs = []
self.allocations = []
for i in range(self.engine.num_bindings):
is_input = False
if self.engine.binding_is_input(i):
is_input = True
name = self.engine.get_binding_name(i)
dtype = self.engine.get_binding_dtype(i)
shape = self.engine.get_binding_shape(i)
if is_input:
self.batch_size = shape[0]
size = np.dtype(trt.nptype(dtype)).itemsize
for s in shape:
size *= s
allocation = cuda.mem_alloc(size)
binding = {
'index': i,
'name': name,
'dtype': np.dtype(trt.nptype(dtype)),
'shape': list(shape),
'allocation': allocation,
}
self.allocations.append(allocation)
if self.engine.binding_is_input(i):
self.inputs.append(binding)
self._input_shape = shape
else:
self.outputs.append(binding)
assert self.batch_size > 0
assert len(self.inputs) > 0
assert len(self.outputs) > 0
assert len(self.allocations) > 0
def input_spec(self):
"""Get the specs for the input tensor of the network. Useful to prepare memory allocations.
Args:
None
Returns:
the shape of the input tensor.
(numpy) datatype of the input tensor.
"""
return self.inputs[0]['shape'], self.inputs[0]['dtype']
def output_spec(self):
"""Get the specs for the output tensors of the network. Useful to prepare memory allocations.
Args:
None
Returns:
specs: A list with two items per element, the shape and (numpy) datatype of each output tensor.
"""
specs = []
for o in self.outputs:
specs.append((o['shape'], o['dtype']))
return specs
def infer(self, imgs, scales=None):
"""Execute inference on a batch of images.
The images should already be batched and preprocessed, as prepared by
the ImageBatcher class. Memory copying to and from the GPU device will be performed here.
Args:
imgs: A numpy array holding the image batch.
scales: The image resize scales for each image in this batch.
Default: No scale postprocessing applied.
Returns:
detections: A nested list for each image in the batch and each detection in the list.
"""
# Prepare the output data
outputs = []
for shape, dtype in self.output_spec():
outputs.append(np.zeros(shape, dtype))
# Process I/O and execute the network
cuda.memcpy_htod(self.inputs[0]['allocation'], np.ascontiguousarray(imgs))
self.context.execute_v2(self.allocations)
for o in range(len(outputs)):
cuda.memcpy_dtoh(outputs[o], self.outputs[o]['allocation'])
nums = self.max_detections_per_image
boxes = outputs[1][:, :nums, :]
scores = outputs[2][:, :nums]
classes = outputs[3][:, :nums]
# Reorganize from y1, x1, y2, x2 to x1, y1, x2, y2
boxes[:, :, [0, 1]] = boxes[:, :, [1, 0]]
boxes[:, :, [2, 3]] = boxes[:, :, [3, 2]]
# convert x2, y2 to w, h
boxes[:, :, 2] -= boxes[:, :, 0]
boxes[:, :, 3] -= boxes[:, :, 1]
# Scale the box
for i in range(len(boxes)):
boxes[i] /= scales[i]
detections = {}
detections['num_detections'] = np.array([nums] * self.batch_size).astype(np.int32)
detections['detection_classes'] = classes + 1
detections['detection_scores'] = scores
detections['detection_boxes'] = boxes
return detections
def __del__(self):
"""Simple function to destroy tensorrt handlers."""
if self.context:
del self.context
if self.engine:
del self.engine
if self.allocations:
self.allocations.clear()
| tao_deploy-main | nvidia_tao_deploy/cv/efficientdet_tf1/inferencer.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""EfficientDet TensorRT engine builder."""
import logging
import os
import sys
import onnx
import tensorrt as trt
from nvidia_tao_deploy.engine.builder import EngineBuilder
logging.basicConfig(format='%(asctime)s [TAO Toolkit] [%(levelname)s] %(name)s %(lineno)d: %(message)s',
level="INFO")
logger = logging.getLogger(__name__)
class EfficientDetEngineBuilder(EngineBuilder):
"""Parses an ONNX graph and builds a TensorRT engine from it."""
def get_input_dims(self, model_path):
"""Get input dimension of UFF model."""
onnx_model = onnx.load(model_path)
onnx_inputs = onnx_model.graph.input
logger.info('List inputs:')
for i, inputs in enumerate(onnx_inputs):
logger.info('Input %s -> %s.', i, inputs.name)
logger.info('%s.', [i.dim_value for i in inputs.type.tensor_type.shape.dim][1:])
logger.info('%s.', [i.dim_value for i in inputs.type.tensor_type.shape.dim][0])
def create_network(self, model_path, file_format="onnx"):
"""Parse the ONNX graph and create the corresponding TensorRT network definition.
Args:
model_path: The path to the ONNX graph to load.
"""
if file_format == "onnx":
self.get_input_dims(model_path)
network_flags = (1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH))
self.network = self.builder.create_network(network_flags)
self.parser = trt.OnnxParser(self.network, self.trt_logger)
model_path = os.path.realpath(model_path)
with open(model_path, "rb") as f:
if not self.parser.parse(f.read()):
logger.error("Failed to load ONNX file: %s", model_path)
for error in range(self.parser.num_errors):
logger.error(self.parser.get_error(error))
sys.exit(1)
inputs = [self.network.get_input(i) for i in range(self.network.num_inputs)]
outputs = [self.network.get_output(i) for i in range(self.network.num_outputs)]
logger.info("Network Description")
for input in inputs: # noqa pylint: disable=W0622
self.batch_size = input.shape[0]
logger.info("Input '%s' with shape %s and dtype %s", input.name, input.shape, input.dtype)
for output in outputs:
logger.info("Output '%s' with shape %s and dtype %s", output.name, output.shape, output.dtype)
# TF1 EfficientDet only support static batch size
assert self.batch_size > 0
else:
logger.info("Parsing UFF model")
raise NotImplementedError("UFF for EfficientDet is not supported")
| tao_deploy-main | nvidia_tao_deploy/cv/efficientdet_tf1/engine_builder.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TAO Deploy EfficientDet."""
| tao_deploy-main | nvidia_tao_deploy/cv/efficientdet_tf1/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""EfficientDet loader."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from PIL import Image
from nvidia_tao_deploy.dataloader.coco import COCOLoader
class EfficientDetCOCOLoader(COCOLoader):
"""EfficientDet DataLoader."""
def preprocess_image(self, image_path):
"""The image preprocessor loads an image from disk and prepares it as needed for batching.
This includes padding, resizing, normalization, data type casting, and transposing.
This Image Batcher implements one algorithm for now:
* EfficientDet: Resizes and pads the image to fit the input size.
Args:
image_path(str): The path to the image on disk to load.
Returns:
image (np.array): A numpy array holding the image sample, ready to be concatenated
into the rest of the batch
scale (list): the resize scale used, if any.
"""
def resize_pad(image, pad_color=(0, 0, 0)):
"""Resize and Pad.
A subroutine to implement padding and resizing. This will resize the image to fit
fully within the input size, and pads the remaining bottom-right portions with
the value provided.
Args:
image (PIL.Image): The PIL image object
pad_color (list): The RGB values to use for the padded area. Default: Black/Zeros.
Returns:
pad (PIL.Image): The PIL image object already padded and cropped,
scale (list): the resize scale used.
"""
width, height = image.size
width_scale = width / self.width
height_scale = height / self.height
scale = 1.0 / max(width_scale, height_scale)
image = image.resize(
(round(width * scale), round(height * scale)),
resample=Image.BILINEAR)
pad = Image.new("RGB", (self.width, self.height))
pad.paste(pad_color, [0, 0, self.width, self.height])
pad.paste(image)
return pad, scale
scale = None
image = Image.open(image_path)
image = image.convert(mode='RGB')
# For EfficientNet V2: Resize & Pad with ImageNet mean values
# and keep as [0,255] Normalization
image, scale = resize_pad(image, (124, 116, 104))
image = np.asarray(image, dtype=self.dtype)
# [0-1] Normalization, Mean subtraction and Std Dev scaling are
# part of the EfficientDet graph, so no need to do it during preprocessing here
if self.data_format == "channels_first":
image = np.transpose(image, (2, 0, 1))
return image, scale
| tao_deploy-main | nvidia_tao_deploy/cv/efficientdet_tf1/dataloader.py |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nvidia_tao_deploy/cv/efficientdet_tf1/proto/training_config.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='nvidia_tao_deploy/cv/efficientdet_tf1/proto/training_config.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\nAnvidia_tao_deploy/cv/efficientdet_tf1/proto/training_config.proto\"\xf9\x04\n\x0eTrainingConfig\x12\x18\n\x10train_batch_size\x18\x01 \x01(\r\x12\x1b\n\x13iterations_per_loop\x18\x02 \x01(\r\x12\x0f\n\x07use_xla\x18\x03 \x01(\x08\x12\x17\n\x0f\x64isable_logging\x18\x04 \x01(\x08\x12\x12\n\ncheckpoint\x18\x05 \x01(\t\x12\x15\n\rstop_at_epoch\x18\x06 \x01(\r\x12\x0e\n\x06resume\x18\x07 \x01(\x08\x12\x19\n\x11\x63heckpoint_period\x18\x08 \x01(\r\x12\x1b\n\x13keep_checkpoint_max\x18\t \x01(\r\x12\x1e\n\x16num_examples_per_epoch\x18\n \x01(\r\x12\x12\n\nnum_epochs\x18\x0b \x01(\r\x12!\n\x19skip_checkpoint_variables\x18\x0c \x01(\t\x12\x1a\n\x12profile_skip_steps\x18\r \x01(\r\x12\x16\n\x0etf_random_seed\x18\x0e \x01(\r\x12\x1c\n\x14moving_average_decay\x18\x0f \x01(\x02\x12\x17\n\x0flr_warmup_epoch\x18\x10 \x01(\x02\x12\x16\n\x0elr_warmup_init\x18\x11 \x01(\x02\x12\x15\n\rlearning_rate\x18\x12 \x01(\x02\x12\x0b\n\x03\x61mp\x18\x13 \x01(\x08\x12\x17\n\x0fl2_weight_decay\x18\x14 \x01(\x02\x12\x17\n\x0fl1_weight_decay\x18\x15 \x01(\x02\x12\x19\n\x11pruned_model_path\x18\x16 \x01(\t\x12\x1b\n\x13\x63lip_gradients_norm\x18\x17 \x01(\x02\x12\x10\n\x08momentum\x18\x18 \x01(\x02\x12\x19\n\x11logging_frequency\x18\x19 \x01(\rb\x06proto3')
)
_TRAININGCONFIG = _descriptor.Descriptor(
name='TrainingConfig',
full_name='TrainingConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='train_batch_size', full_name='TrainingConfig.train_batch_size', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='iterations_per_loop', full_name='TrainingConfig.iterations_per_loop', index=1,
number=2, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='use_xla', full_name='TrainingConfig.use_xla', index=2,
number=3, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='disable_logging', full_name='TrainingConfig.disable_logging', index=3,
number=4, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='checkpoint', full_name='TrainingConfig.checkpoint', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='stop_at_epoch', full_name='TrainingConfig.stop_at_epoch', index=5,
number=6, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='resume', full_name='TrainingConfig.resume', index=6,
number=7, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='checkpoint_period', full_name='TrainingConfig.checkpoint_period', index=7,
number=8, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='keep_checkpoint_max', full_name='TrainingConfig.keep_checkpoint_max', index=8,
number=9, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='num_examples_per_epoch', full_name='TrainingConfig.num_examples_per_epoch', index=9,
number=10, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='num_epochs', full_name='TrainingConfig.num_epochs', index=10,
number=11, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='skip_checkpoint_variables', full_name='TrainingConfig.skip_checkpoint_variables', index=11,
number=12, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='profile_skip_steps', full_name='TrainingConfig.profile_skip_steps', index=12,
number=13, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='tf_random_seed', full_name='TrainingConfig.tf_random_seed', index=13,
number=14, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='moving_average_decay', full_name='TrainingConfig.moving_average_decay', index=14,
number=15, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='lr_warmup_epoch', full_name='TrainingConfig.lr_warmup_epoch', index=15,
number=16, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='lr_warmup_init', full_name='TrainingConfig.lr_warmup_init', index=16,
number=17, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='learning_rate', full_name='TrainingConfig.learning_rate', index=17,
number=18, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='amp', full_name='TrainingConfig.amp', index=18,
number=19, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='l2_weight_decay', full_name='TrainingConfig.l2_weight_decay', index=19,
number=20, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='l1_weight_decay', full_name='TrainingConfig.l1_weight_decay', index=20,
number=21, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='pruned_model_path', full_name='TrainingConfig.pruned_model_path', index=21,
number=22, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='clip_gradients_norm', full_name='TrainingConfig.clip_gradients_norm', index=22,
number=23, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='momentum', full_name='TrainingConfig.momentum', index=23,
number=24, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='logging_frequency', full_name='TrainingConfig.logging_frequency', index=24,
number=25, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=70,
serialized_end=703,
)
DESCRIPTOR.message_types_by_name['TrainingConfig'] = _TRAININGCONFIG
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
TrainingConfig = _reflection.GeneratedProtocolMessageType('TrainingConfig', (_message.Message,), dict(
DESCRIPTOR = _TRAININGCONFIG,
__module__ = 'nvidia_tao_deploy.cv.efficientdet_tf1.proto.training_config_pb2'
# @@protoc_insertion_point(class_scope:TrainingConfig)
))
_sym_db.RegisterMessage(TrainingConfig)
# @@protoc_insertion_point(module_scope)
| tao_deploy-main | nvidia_tao_deploy/cv/efficientdet_tf1/proto/training_config_pb2.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TAO Deploy EfficientDet TF1 Proto."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_deploy-main | nvidia_tao_deploy/cv/efficientdet_tf1/proto/__init__.py |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nvidia_tao_deploy/cv/efficientdet_tf1/proto/experiment.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from nvidia_tao_deploy.cv.efficientdet_tf1.proto import aug_config_pb2 as nvidia__tao__deploy_dot_cv_dot_efficientdet__tf1_dot_proto_dot_aug__config__pb2
from nvidia_tao_deploy.cv.efficientdet_tf1.proto import dataset_config_pb2 as nvidia__tao__deploy_dot_cv_dot_efficientdet__tf1_dot_proto_dot_dataset__config__pb2
from nvidia_tao_deploy.cv.efficientdet_tf1.proto import eval_config_pb2 as nvidia__tao__deploy_dot_cv_dot_efficientdet__tf1_dot_proto_dot_eval__config__pb2
from nvidia_tao_deploy.cv.efficientdet_tf1.proto import model_config_pb2 as nvidia__tao__deploy_dot_cv_dot_efficientdet__tf1_dot_proto_dot_model__config__pb2
from nvidia_tao_deploy.cv.efficientdet_tf1.proto import training_config_pb2 as nvidia__tao__deploy_dot_cv_dot_efficientdet__tf1_dot_proto_dot_training__config__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='nvidia_tao_deploy/cv/efficientdet_tf1/proto/experiment.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n<nvidia_tao_deploy/cv/efficientdet_tf1/proto/experiment.proto\x1a<nvidia_tao_deploy/cv/efficientdet_tf1/proto/aug_config.proto\x1a@nvidia_tao_deploy/cv/efficientdet_tf1/proto/dataset_config.proto\x1a=nvidia_tao_deploy/cv/efficientdet_tf1/proto/eval_config.proto\x1a>nvidia_tao_deploy/cv/efficientdet_tf1/proto/model_config.proto\x1a\x41nvidia_tao_deploy/cv/efficientdet_tf1/proto/training_config.proto\"\xcd\x01\n\nExperiment\x12&\n\x0e\x64\x61taset_config\x18\x01 \x01(\x0b\x32\x0e.DatasetConfig\x12(\n\x0ftraining_config\x18\x02 \x01(\x0b\x32\x0f.TrainingConfig\x12 \n\x0b\x65val_config\x18\x03 \x01(\x0b\x32\x0b.EvalConfig\x12\'\n\x13\x61ugmentation_config\x18\x04 \x01(\x0b\x32\n.AugConfig\x12\"\n\x0cmodel_config\x18\x05 \x01(\x0b\x32\x0c.ModelConfigb\x06proto3')
,
dependencies=[nvidia__tao__deploy_dot_cv_dot_efficientdet__tf1_dot_proto_dot_aug__config__pb2.DESCRIPTOR,nvidia__tao__deploy_dot_cv_dot_efficientdet__tf1_dot_proto_dot_dataset__config__pb2.DESCRIPTOR,nvidia__tao__deploy_dot_cv_dot_efficientdet__tf1_dot_proto_dot_eval__config__pb2.DESCRIPTOR,nvidia__tao__deploy_dot_cv_dot_efficientdet__tf1_dot_proto_dot_model__config__pb2.DESCRIPTOR,nvidia__tao__deploy_dot_cv_dot_efficientdet__tf1_dot_proto_dot_training__config__pb2.DESCRIPTOR,])
_EXPERIMENT = _descriptor.Descriptor(
name='Experiment',
full_name='Experiment',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='dataset_config', full_name='Experiment.dataset_config', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='training_config', full_name='Experiment.training_config', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='eval_config', full_name='Experiment.eval_config', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='augmentation_config', full_name='Experiment.augmentation_config', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='model_config', full_name='Experiment.model_config', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=387,
serialized_end=592,
)
_EXPERIMENT.fields_by_name['dataset_config'].message_type = nvidia__tao__deploy_dot_cv_dot_efficientdet__tf1_dot_proto_dot_dataset__config__pb2._DATASETCONFIG
_EXPERIMENT.fields_by_name['training_config'].message_type = nvidia__tao__deploy_dot_cv_dot_efficientdet__tf1_dot_proto_dot_training__config__pb2._TRAININGCONFIG
_EXPERIMENT.fields_by_name['eval_config'].message_type = nvidia__tao__deploy_dot_cv_dot_efficientdet__tf1_dot_proto_dot_eval__config__pb2._EVALCONFIG
_EXPERIMENT.fields_by_name['augmentation_config'].message_type = nvidia__tao__deploy_dot_cv_dot_efficientdet__tf1_dot_proto_dot_aug__config__pb2._AUGCONFIG
_EXPERIMENT.fields_by_name['model_config'].message_type = nvidia__tao__deploy_dot_cv_dot_efficientdet__tf1_dot_proto_dot_model__config__pb2._MODELCONFIG
DESCRIPTOR.message_types_by_name['Experiment'] = _EXPERIMENT
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Experiment = _reflection.GeneratedProtocolMessageType('Experiment', (_message.Message,), dict(
DESCRIPTOR = _EXPERIMENT,
__module__ = 'nvidia_tao_deploy.cv.efficientdet_tf1.proto.experiment_pb2'
# @@protoc_insertion_point(class_scope:Experiment)
))
_sym_db.RegisterMessage(Experiment)
# @@protoc_insertion_point(module_scope)
| tao_deploy-main | nvidia_tao_deploy/cv/efficientdet_tf1/proto/experiment_pb2.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TAO Deploy Config Base Utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from google.protobuf.text_format import Merge as merge_text_proto
from nvidia_tao_deploy.cv.efficientdet_tf1.proto.experiment_pb2 import Experiment
def load_proto(config):
"""Load the experiment proto."""
proto = Experiment()
def _load_from_file(filename, pb2):
if not os.path.exists(filename):
raise IOError(f"Specfile not found at: {filename}")
with open(filename, "r", encoding="utf-8") as f:
merge_text_proto(f.read(), pb2)
_load_from_file(config, proto)
return proto
| tao_deploy-main | nvidia_tao_deploy/cv/efficientdet_tf1/proto/utils.py |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nvidia_tao_deploy/cv/efficientdet_tf1/proto/eval_config.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='nvidia_tao_deploy/cv/efficientdet_tf1/proto/eval_config.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n=nvidia_tao_deploy/cv/efficientdet_tf1/proto/eval_config.proto\"\xdf\x01\n\nEvalConfig\x12\x19\n\x11min_eval_interval\x18\x01 \x01(\r\x12\x14\n\x0c\x65val_timeout\x18\x02 \x01(\r\x12\x17\n\x0f\x65val_batch_size\x18\x03 \x01(\r\x12\x18\n\x10\x65val_epoch_cycle\x18\x04 \x01(\r\x12\x1b\n\x13\x65val_after_training\x18\x05 \x01(\x08\x12\x14\n\x0c\x65val_samples\x18\x06 \x01(\r\x12\x18\n\x10min_score_thresh\x18\x07 \x01(\x02\x12 \n\x18max_detections_per_image\x18\x08 \x01(\rb\x06proto3')
)
_EVALCONFIG = _descriptor.Descriptor(
name='EvalConfig',
full_name='EvalConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='min_eval_interval', full_name='EvalConfig.min_eval_interval', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='eval_timeout', full_name='EvalConfig.eval_timeout', index=1,
number=2, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='eval_batch_size', full_name='EvalConfig.eval_batch_size', index=2,
number=3, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='eval_epoch_cycle', full_name='EvalConfig.eval_epoch_cycle', index=3,
number=4, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='eval_after_training', full_name='EvalConfig.eval_after_training', index=4,
number=5, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='eval_samples', full_name='EvalConfig.eval_samples', index=5,
number=6, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='min_score_thresh', full_name='EvalConfig.min_score_thresh', index=6,
number=7, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='max_detections_per_image', full_name='EvalConfig.max_detections_per_image', index=7,
number=8, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=66,
serialized_end=289,
)
DESCRIPTOR.message_types_by_name['EvalConfig'] = _EVALCONFIG
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
EvalConfig = _reflection.GeneratedProtocolMessageType('EvalConfig', (_message.Message,), dict(
DESCRIPTOR = _EVALCONFIG,
__module__ = 'nvidia_tao_deploy.cv.efficientdet_tf1.proto.eval_config_pb2'
# @@protoc_insertion_point(class_scope:EvalConfig)
))
_sym_db.RegisterMessage(EvalConfig)
# @@protoc_insertion_point(module_scope)
| tao_deploy-main | nvidia_tao_deploy/cv/efficientdet_tf1/proto/eval_config_pb2.py |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nvidia_tao_deploy/cv/efficientdet_tf1/proto/model_config.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='nvidia_tao_deploy/cv/efficientdet_tf1/proto/model_config.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n>nvidia_tao_deploy/cv/efficientdet_tf1/proto/model_config.proto\"\xb2\x01\n\x0bModelConfig\x12\x12\n\nmodel_name\x18\x01 \x01(\t\x12\x11\n\tfreeze_bn\x18\x02 \x01(\x08\x12\x15\n\rfreeze_blocks\x18\x03 \x01(\t\x12\x15\n\raspect_ratios\x18\x04 \x01(\t\x12\x14\n\x0c\x61nchor_scale\x18\x05 \x01(\x02\x12\x11\n\tmin_level\x18\x06 \x01(\r\x12\x11\n\tmax_level\x18\x07 \x01(\r\x12\x12\n\nnum_scales\x18\x08 \x01(\rb\x06proto3')
)
_MODELCONFIG = _descriptor.Descriptor(
name='ModelConfig',
full_name='ModelConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='model_name', full_name='ModelConfig.model_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='freeze_bn', full_name='ModelConfig.freeze_bn', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='freeze_blocks', full_name='ModelConfig.freeze_blocks', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='aspect_ratios', full_name='ModelConfig.aspect_ratios', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='anchor_scale', full_name='ModelConfig.anchor_scale', index=4,
number=5, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='min_level', full_name='ModelConfig.min_level', index=5,
number=6, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='max_level', full_name='ModelConfig.max_level', index=6,
number=7, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='num_scales', full_name='ModelConfig.num_scales', index=7,
number=8, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=67,
serialized_end=245,
)
DESCRIPTOR.message_types_by_name['ModelConfig'] = _MODELCONFIG
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
ModelConfig = _reflection.GeneratedProtocolMessageType('ModelConfig', (_message.Message,), dict(
DESCRIPTOR = _MODELCONFIG,
__module__ = 'nvidia_tao_deploy.cv.efficientdet_tf1.proto.model_config_pb2'
# @@protoc_insertion_point(class_scope:ModelConfig)
))
_sym_db.RegisterMessage(ModelConfig)
# @@protoc_insertion_point(module_scope)
| tao_deploy-main | nvidia_tao_deploy/cv/efficientdet_tf1/proto/model_config_pb2.py |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nvidia_tao_deploy/cv/efficientdet_tf1/proto/aug_config.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='nvidia_tao_deploy/cv/efficientdet_tf1/proto/aug_config.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n<nvidia_tao_deploy/cv/efficientdet_tf1/proto/aug_config.proto\"]\n\tAugConfig\x12\x12\n\nrand_hflip\x18\x01 \x01(\x08\x12\x1d\n\x15random_crop_min_scale\x18\x02 \x01(\x02\x12\x1d\n\x15random_crop_max_scale\x18\x03 \x01(\x02\x62\x06proto3')
)
_AUGCONFIG = _descriptor.Descriptor(
name='AugConfig',
full_name='AugConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='rand_hflip', full_name='AugConfig.rand_hflip', index=0,
number=1, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='random_crop_min_scale', full_name='AugConfig.random_crop_min_scale', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='random_crop_max_scale', full_name='AugConfig.random_crop_max_scale', index=2,
number=3, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=64,
serialized_end=157,
)
DESCRIPTOR.message_types_by_name['AugConfig'] = _AUGCONFIG
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
AugConfig = _reflection.GeneratedProtocolMessageType('AugConfig', (_message.Message,), dict(
DESCRIPTOR = _AUGCONFIG,
__module__ = 'nvidia_tao_deploy.cv.efficientdet_tf1.proto.aug_config_pb2'
# @@protoc_insertion_point(class_scope:AugConfig)
))
_sym_db.RegisterMessage(AugConfig)
# @@protoc_insertion_point(module_scope)
| tao_deploy-main | nvidia_tao_deploy/cv/efficientdet_tf1/proto/aug_config_pb2.py |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nvidia_tao_deploy/cv/efficientdet_tf1/proto/dataset_config.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='nvidia_tao_deploy/cv/efficientdet_tf1/proto/dataset_config.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n@nvidia_tao_deploy/cv/efficientdet_tf1/proto/dataset_config.proto\"\x87\x02\n\rDatasetConfig\x12\x1d\n\x15training_file_pattern\x18\x01 \x01(\t\x12\x1f\n\x17validation_file_pattern\x18\x02 \x01(\t\x12\x1c\n\x14validation_json_file\x18\x03 \x01(\t\x12\x13\n\x0btestdev_dir\x18\x04 \x01(\t\x12\x13\n\x0bnum_classes\x18\x05 \x01(\r\x12\x12\n\nimage_size\x18\x06 \x01(\t\x12\x15\n\ruse_fake_data\x18\x07 \x01(\x08\x12\x1f\n\x17max_instances_per_image\x18\x08 \x01(\r\x12\"\n\x1askip_crowd_during_training\x18\t \x01(\x08\x62\x06proto3')
)
_DATASETCONFIG = _descriptor.Descriptor(
name='DatasetConfig',
full_name='DatasetConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='training_file_pattern', full_name='DatasetConfig.training_file_pattern', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='validation_file_pattern', full_name='DatasetConfig.validation_file_pattern', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='validation_json_file', full_name='DatasetConfig.validation_json_file', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='testdev_dir', full_name='DatasetConfig.testdev_dir', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='num_classes', full_name='DatasetConfig.num_classes', index=4,
number=5, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='image_size', full_name='DatasetConfig.image_size', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='use_fake_data', full_name='DatasetConfig.use_fake_data', index=6,
number=7, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='max_instances_per_image', full_name='DatasetConfig.max_instances_per_image', index=7,
number=8, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='skip_crowd_during_training', full_name='DatasetConfig.skip_crowd_during_training', index=8,
number=9, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=69,
serialized_end=332,
)
DESCRIPTOR.message_types_by_name['DatasetConfig'] = _DATASETCONFIG
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
DatasetConfig = _reflection.GeneratedProtocolMessageType('DatasetConfig', (_message.Message,), dict(
DESCRIPTOR = _DATASETCONFIG,
__module__ = 'nvidia_tao_deploy.cv.efficientdet_tf1.proto.dataset_config_pb2'
# @@protoc_insertion_point(class_scope:DatasetConfig)
))
_sym_db.RegisterMessage(DatasetConfig)
# @@protoc_insertion_point(module_scope)
| tao_deploy-main | nvidia_tao_deploy/cv/efficientdet_tf1/proto/dataset_config_pb2.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""EfficientDet convert etlt/onnx model to TRT engine."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import logging
import os
import tempfile
from nvidia_tao_deploy.cv.common.decorators import monitor_status
from nvidia_tao_deploy.cv.efficientdet_tf1.engine_builder import EfficientDetEngineBuilder
from nvidia_tao_deploy.utils.decoding import decode_model
logging.basicConfig(format='%(asctime)s [TAO Toolkit] [%(levelname)s] %(name)s %(lineno)d: %(message)s',
level="INFO")
logger = logging.getLogger(__name__)
DEFAULT_MAX_BATCH_SIZE = 1
DEFAULT_MIN_BATCH_SIZE = 1
DEFAULT_OPT_BATCH_SIZE = 1
@monitor_status(name='efficientdet_tf1', mode='gen_trt_engine')
def main(args):
"""Convert encrypted uff or onnx model to TRT engine."""
# decrypt etlt
tmp_onnx_file, file_format = decode_model(args.model_path, args.key)
if args.engine_file is not None or args.data_type == 'int8':
if args.engine_file is None:
engine_handle, temp_engine_path = tempfile.mkstemp()
os.close(engine_handle)
output_engine_path = temp_engine_path
else:
output_engine_path = args.engine_file
builder = EfficientDetEngineBuilder(verbose=args.verbose,
workspace=args.max_workspace_size,
min_batch_size=args.min_batch_size,
opt_batch_size=args.opt_batch_size,
max_batch_size=args.max_batch_size,
strict_type_constraints=args.strict_type_constraints)
builder.create_network(tmp_onnx_file, file_format=file_format)
builder.create_engine(
output_engine_path,
args.data_type,
calib_input=args.cal_image_dir,
calib_cache=args.cal_cache_file,
calib_num_images=args.batch_size * args.batches,
calib_batch_size=args.batch_size)
logging.info("Export finished successfully.")
def build_command_line_parser(parser=None):
"""Build the command line parser using argparse.
Args:
parser (subparser): Provided from the wrapper script to build a chained
parser mechanism.
Returns:
parser
"""
if parser is None:
parser = argparse.ArgumentParser(prog='gen_trt_engine', description='Generate TRT engine of EfficientDet model.')
parser.add_argument(
'-m',
'--model_path',
type=str,
required=True,
help='Path to an EfficientDet .etlt or .onnx model file.'
)
parser.add_argument(
'-k',
'--key',
type=str,
required=False,
help='Key to save or load a .etlt model.'
)
parser.add_argument(
"--data_type",
type=str,
default="fp32",
help="Data type for the TensorRT export.",
choices=["fp32", "fp16", "int8"])
parser.add_argument(
"--cal_image_dir",
default="",
type=str,
help="Directory of images to run int8 calibration.")
parser.add_argument(
'--cal_cache_file',
default=None,
type=str,
help='Calibration cache file to write to.')
parser.add_argument(
"--engine_file",
type=str,
default=None,
help="Path to the exported TRT engine.")
parser.add_argument(
"--max_batch_size",
type=int,
default=DEFAULT_MAX_BATCH_SIZE,
help="Max batch size for TensorRT engine builder.")
parser.add_argument(
"--min_batch_size",
type=int,
default=DEFAULT_MIN_BATCH_SIZE,
help="Min batch size for TensorRT engine builder.")
parser.add_argument(
"--opt_batch_size",
type=int,
default=DEFAULT_OPT_BATCH_SIZE,
help="Opt batch size for TensorRT engine builder.")
parser.add_argument(
"--batch_size",
type=int,
default=1,
help="Number of images per batch.")
parser.add_argument(
"--batches",
type=int,
default=10,
help="Number of batches to calibrate over.")
parser.add_argument(
"--max_workspace_size",
type=int,
default=2,
help="Max memory workspace size to allow in Gb for TensorRT engine builder (default: 2).")
parser.add_argument(
"-s",
"--strict_type_constraints",
action="store_true",
default=False,
help="A Boolean flag indicating whether to apply the \
TensorRT strict type constraints when building the TensorRT engine.")
parser.add_argument(
"-v",
"--verbose",
action="store_true",
default=False,
help="Verbosity of the logger.")
parser.add_argument(
'-r',
'--results_dir',
type=str,
required=True,
default=None,
help='Output directory where the log is saved.'
)
return parser
def parse_command_line_arguments(args=None):
"""Simple function to parse command line arguments."""
parser = build_command_line_parser()
return parser.parse_args(args)
if __name__ == '__main__':
args = parse_command_line_arguments()
main(args)
| tao_deploy-main | nvidia_tao_deploy/cv/efficientdet_tf1/scripts/gen_trt_engine.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TAO Deploy TF1 EfficientDet scripts module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_deploy-main | nvidia_tao_deploy/cv/efficientdet_tf1/scripts/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Standalone TensorRT inference."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
from PIL import Image
import logging
import numpy as np
from tqdm.auto import tqdm
from nvidia_tao_deploy.cv.common.decorators import monitor_status
from nvidia_tao_deploy.cv.efficientdet_tf1.inferencer import EfficientDetInferencer
from nvidia_tao_deploy.cv.efficientdet_tf1.proto.utils import load_proto
from nvidia_tao_deploy.utils.image_batcher import ImageBatcher
logging.basicConfig(format='%(asctime)s [TAO Toolkit] [%(levelname)s] %(name)s %(lineno)d: %(message)s',
level="INFO")
logger = logging.getLogger(__name__)
def get_label_dict(label_txt):
"""Create label dict from txt file."""
with open(label_txt, 'r', encoding="utf-8") as f:
labels = f.readlines()
result = {i + 1: label.strip() for i, label in enumerate(labels)}
result[-1] = "background"
return result
@monitor_status(name='efficientdet_tf1', mode='inference')
def main(args):
"""EfficientDet TRT inference."""
# Load from proto-based spec file
es = load_proto(args.experiment_spec)
max_detections_per_image = es.eval_config.max_detections_per_image if es.eval_config.max_detections_per_image else 100
trt_infer = EfficientDetInferencer(args.model_path, max_detections_per_image=max_detections_per_image)
# Inference may not have labels. Hence, use image batcher
batcher = ImageBatcher(args.image_dir,
tuple(trt_infer._input_shape),
trt_infer.inputs[0]['dtype'],
preprocessor="EfficientDet")
# Create results directories
if args.results_dir is None:
results_dir = os.path.dirname(args.model_path)
else:
results_dir = args.results_dir
os.makedirs(results_dir, exist_ok=True)
output_annotate_root = os.path.join(results_dir, "images_annotated")
output_label_root = os.path.join(results_dir, "labels")
os.makedirs(output_annotate_root, exist_ok=True)
os.makedirs(output_label_root, exist_ok=True)
if args.class_map and not os.path.exists(args.class_map):
raise FileNotFoundError(f"Class map at {args.class_map} does not exist.")
if args.class_map:
inv_classes = get_label_dict(args.class_map)
else:
inv_classes = None
logger.debug("label_map was not provided. Hence, class predictions will not be displayed on the visualization.")
for batch, img_paths, scales in tqdm(batcher.get_batch(), total=batcher.num_batches, desc="Producing predictions"):
detections = trt_infer.infer(batch, scales)
y_pred_valid = np.concatenate([detections['detection_classes'][..., None],
detections['detection_scores'][..., None],
detections['detection_boxes']], axis=-1)
for img_path, pred in zip(img_paths, y_pred_valid):
# Load Image
img = Image.open(img_path)
orig_width, orig_height = img.size
# Convert xywh to xyxy
pred[:, 4:] += pred[:, 2:4]
pred[..., 2::4] = np.clip(pred[..., 2::4], 0.0, orig_width)
pred[..., 3::5] = np.clip(pred[..., 3::5], 0.0, orig_height)
# Scale back the predictions
# pred[:, 2:6] *= sc
bbox_img, label_strings = trt_infer.draw_bbox(img, pred, inv_classes, args.threshold)
img_filename = os.path.basename(img_path)
bbox_img.save(os.path.join(output_annotate_root, img_filename))
# Store labels
filename, _ = os.path.splitext(img_filename)
label_file_name = os.path.join(output_label_root, filename + ".txt")
with open(label_file_name, "w", encoding="utf-8") as f:
for l_s in label_strings:
f.write(l_s)
logging.info("Finished inference.")
def build_command_line_parser(parser=None):
"""Build the command line parser using argparse.
Args:
parser (subparser): Provided from the wrapper script to build a chained
parser mechanism.
Returns:
parser
"""
if parser is None:
parser = argparse.ArgumentParser(prog='infer', description='Inference with an EfficientDet TRT model.')
parser.add_argument(
'-e',
'--experiment_spec',
type=str,
required=True,
help='Path to the experiment spec file.'
)
parser.add_argument(
'-i',
'--image_dir',
type=str,
required=True,
default=None,
help='Input directory of images')
parser.add_argument(
'-m',
'--model_path',
type=str,
required=True,
help='Path to the EfficientDet TensorRT engine.'
)
parser.add_argument(
'-c',
'--class_map',
type=str,
default=None,
required=False,
help='The path to the class label file.'
)
parser.add_argument(
'-r',
'--results_dir',
type=str,
required=True,
default=None,
help='Output directory where the log is saved.'
)
parser.add_argument(
'-t',
'--threshold',
type=float,
default=0.5,
help='Confidence threshold for inference.'
)
return parser
def parse_command_line_arguments(args=None):
"""Simple function to parse command line arguments."""
parser = build_command_line_parser(args)
return parser.parse_args(args)
if __name__ == '__main__':
args = parse_command_line_arguments()
main(args)
| tao_deploy-main | nvidia_tao_deploy/cv/efficientdet_tf1/scripts/inference.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Standalone TensorRT inference."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import operator
import copy
import logging
import json
import six
import numpy as np
from tqdm.auto import tqdm
from nvidia_tao_deploy.cv.common.decorators import monitor_status
from nvidia_tao_deploy.cv.efficientdet_tf1.dataloader import EfficientDetCOCOLoader
from nvidia_tao_deploy.cv.efficientdet_tf1.inferencer import EfficientDetInferencer
from nvidia_tao_deploy.cv.efficientdet_tf1.proto.utils import load_proto
from nvidia_tao_deploy.metrics.coco_metric import EvaluationMetric
logging.basicConfig(format='%(asctime)s [TAO Toolkit] [%(levelname)s] %(name)s %(lineno)d: %(message)s',
level="INFO")
logger = logging.getLogger(__name__)
@monitor_status(name='efficientdet_tf1', mode='evaluation')
def main(args):
"""EfficientDet TRT evaluation."""
# Load from proto-based spec file
es = load_proto(args.experiment_spec)
eval_samples = es.eval_config.eval_samples if es.eval_config.eval_samples else 0
eval_metric = EvaluationMetric(es.dataset_config.validation_json_file, include_mask=False)
trt_infer = EfficientDetInferencer(args.model_path)
dl = EfficientDetCOCOLoader(
es.dataset_config.validation_json_file,
shape=trt_infer.inputs[0]['shape'],
dtype=trt_infer.inputs[0]['dtype'],
batch_size=1, # TF1 EfficentDet only supports bs=1
image_dir=args.image_dir,
eval_samples=eval_samples)
predictions = {
'detection_scores': [],
'detection_boxes': [],
'detection_classes': [],
'source_id': [],
'image_info': [],
'num_detections': []
}
def evaluation_preds(preds):
# Essential to avoid modifying the source dict
_preds = copy.deepcopy(preds)
for k, _ in six.iteritems(_preds):
_preds[k] = np.concatenate(_preds[k], axis=0)
eval_results = eval_metric.predict_metric_fn(_preds)
return eval_results
for imgs, scale, source_id, labels in tqdm(dl, total=len(dl), desc="Producing predictions"):
image = np.array(imgs)
image_info = []
for i, label in enumerate(labels):
image_info.append([label[-1][0], label[-1][1], scale[i], label[-1][2], label[-1][3]])
image_info = np.array(image_info)
detections = trt_infer.infer(image, scale)
predictions['detection_classes'].append(detections['detection_classes'])
predictions['detection_scores'].append(detections['detection_scores'])
predictions['detection_boxes'].append(detections['detection_boxes'])
predictions['num_detections'].append(detections['num_detections'])
predictions['image_info'].append(image_info)
predictions['source_id'].append(source_id)
eval_results = evaluation_preds(preds=predictions)
for key, value in sorted(eval_results.items(), key=operator.itemgetter(0)):
eval_results[key] = float(value)
logging.info("%s: %.9f", key, value)
if args.results_dir is None:
results_dir = os.path.dirname(args.model_path)
else:
results_dir = args.results_dir
with open(os.path.join(results_dir, "results.json"), "w", encoding="utf-8") as f:
json.dump(eval_results, f)
logging.info("Finished evaluation.")
def build_command_line_parser(parser=None):
"""Build the command line parser using argparse.
Args:
parser (subparser): Provided from the wrapper script to build a chained
parser mechanism.
Returns:
parser
"""
if parser is None:
parser = argparse.ArgumentParser(prog='eval', description='Evaluate with an EfficientDet TRT model.')
parser.add_argument(
'-e',
'--experiment_spec',
type=str,
required=True,
help='Path to the experiment spec file.'
)
parser.add_argument(
'-i',
'--image_dir',
type=str,
required=True,
default=None,
help='Input directory of images')
parser.add_argument(
'-m',
'--model_path',
type=str,
required=True,
help='Path to the EfficientDet TensorRT engine.'
)
parser.add_argument(
'-r',
'--results_dir',
type=str,
required=True,
default=None,
help='Output directory where the log is saved.'
)
return parser
def parse_command_line_arguments(args=None):
"""Simple function to parse command line arguments."""
parser = build_command_line_parser(args)
return parser.parse_args(args)
if __name__ == '__main__':
args = parse_command_line_arguments()
main(args)
| tao_deploy-main | nvidia_tao_deploy/cv/efficientdet_tf1/scripts/evaluate.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TAO Deploy command line wrapper to invoke CLI scripts."""
import sys
from nvidia_tao_deploy.cv.common.entrypoint.entrypoint_proto import launch_job
import nvidia_tao_deploy.cv.efficientdet_tf1.scripts
def main():
"""Function to launch the job."""
launch_job(nvidia_tao_deploy.cv.efficientdet_tf1.scripts, "efficientdet_tf1", sys.argv[1:])
if __name__ == "__main__":
main()
| tao_deploy-main | nvidia_tao_deploy/cv/efficientdet_tf1/entrypoint/efficientdet_tf1.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Entrypoint module for efficientdet."""
| tao_deploy-main | nvidia_tao_deploy/cv/efficientdet_tf1/entrypoint/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TAO Deploy DINO."""
| tao_deploy-main | nvidia_tao_deploy/cv/dino/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TAO Deploy DINO Hydra."""
| tao_deploy-main | nvidia_tao_deploy/cv/dino/hydra_config/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Default config file."""
from typing import Optional, List, Dict
from dataclasses import dataclass, field
from omegaconf import MISSING
@dataclass
class DINODatasetConvertConfig:
"""Dataset Convert config."""
input_source: Optional[str] = None
data_root: Optional[str] = None
results_dir: str = MISSING
image_dir_name: Optional[str] = None
label_dir_name: Optional[str] = None
val_split: int = 0
num_shards: int = 20
num_partitions: int = 1
partition_mode: Optional[str] = None
image_extension: str = ".jpg"
mapping_path: Optional[str] = None
@dataclass
class DINOAugmentationConfig:
"""Augmentation config."""
scales: List[int] = field(default_factory=lambda: [480, 512, 544, 576, 608, 640, 672, 704, 736, 768, 800],
metadata={"description": "Random Scales for Augmentation"})
input_mean: List[float] = field(default_factory=lambda: [0.485, 0.456, 0.406],
metadata={"description": "Pixel mean value"})
input_std: List[float] = field(default_factory=lambda: [0.229, 0.224, 0.225],
metadata={"description": "Pixel Standard deviation value"})
train_random_resize: List[int] = field(default_factory=lambda: [400, 500, 600],
metadata={"description": "Training Randome Resize"})
horizontal_flip_prob: float = 0.5
train_random_crop_min: int = 384
train_random_crop_max: int = 600
random_resize_max_size: int = 1333
test_random_resize: int = 800
fixed_padding: bool = True
@dataclass
class DINODatasetConfig:
"""Dataset config."""
train_sampler: str = "default_sampler"
train_data_sources: Optional[List[Dict[str, str]]] = None
val_data_sources: Optional[List[Dict[str, str]]] = None
test_data_sources: Optional[Dict[str, str]] = None
infer_data_sources: Optional[Dict[str, str]] = None
batch_size: int = 4
workers: int = 8
pin_memory: bool = True
dataset_type: str = "serialized"
num_classes: int = 91
eval_class_ids: Optional[List[int]] = None
augmentation: DINOAugmentationConfig = DINOAugmentationConfig()
@dataclass
class DINOModelConfig:
"""DINO model config."""
pretrained_backbone_path: Optional[str] = None
backbone: str = "resnet_50"
num_queries: int = 300
num_feature_levels: int = 4
cls_loss_coef: float = 2.0
bbox_loss_coef: float = 5.0
giou_loss_coef: float = 2.0
# DINO training specific
interm_loss_coef: float = 1.0
num_select: int = 300
no_interm_box_loss: bool = False
# DINO model arch specific
pre_norm: bool = False # Add layer norm in encoder or not
two_stage_type: str = 'standard'
decoder_sa_type: str = 'sa'
embed_init_tgt: bool = True
fix_refpoints_hw: int = -1
pe_temperatureH: int = 20
pe_temperatureW: int = 20
return_interm_indices: List[int] = field(default_factory=lambda: [1, 2, 3, 4],
metadata={"description": "Indices to return from backbone"})
# for DN
use_dn: bool = True
dn_number: int = 100
dn_box_noise_scale: float = 1.0
dn_label_noise_ratio: float = 0.5
focal_alpha: float = 0.25
clip_max_norm: float = 0.1
dropout_ratio: float = 0.0
hidden_dim: int = 256
nheads: int = 8
enc_layers: int = 6
dec_layers: int = 6
dim_feedforward: int = 2048
dec_n_points: int = 4
enc_n_points: int = 4
aux_loss: bool = True
dilation: bool = False
train_backbone: bool = True
loss_types: List[str] = field(default_factory=lambda: ['labels', 'boxes'],
metadata={"description": "Losses to be used during training"})
backbone_names: List[str] = field(default_factory=lambda: ["backbone.0"],
metadata={"description": "Backbone name"})
linear_proj_names: List[str] = field(default_factory=lambda: ['reference_points', 'sampling_offsets'],
metadata={"description": "Linear Projection names"})
@dataclass
class OptimConfig:
"""Optimizer config."""
optimizer: str = "AdamW"
monitor_name: str = "val_loss" # {val_loss, train_loss}
lr: float = 2e-4
lr_backbone: float = 2e-5
lr_linear_proj_mult: float = 0.1
momentum: float = 0.9
weight_decay: float = 1e-4
lr_scheduler: str = "MultiStep"
lr_steps: List[int] = field(default_factory=lambda: [11], # 11, 20, 30
metadata={"description": "learning rate decay steps"})
lr_step_size: int = 11
lr_decay: float = 0.1
@dataclass
class DINOTrainExpConfig:
"""Train experiment config."""
num_gpus: int = 1
num_nodes: int = 1
resume_training_checkpoint_path: Optional[str] = None
pretrained_model_path: Optional[str] = None
validation_interval: int = 1
clip_grad_norm: float = 0.1
is_dry_run: bool = False
conf_threshold: float = 0.0
results_dir: Optional[str] = None
num_epochs: int = 12 # 12, 24, 36
checkpoint_interval: int = 1
optim: OptimConfig = OptimConfig()
precision: str = "fp32"
distributed_strategy: str = "ddp"
activation_checkpoint: bool = True
@dataclass
class DINOInferenceExpConfig:
"""Inference experiment config."""
num_gpus: int = 1
results_dir: Optional[str] = None
checkpoint: Optional[str] = None
trt_engine: Optional[str] = None
color_map: Dict[str, str] = MISSING
conf_threshold: float = 0.5
is_internal: bool = False
input_width: Optional[int] = None
input_height: Optional[int] = None
@dataclass
class DINOEvalExpConfig:
"""Evaluation experiment config."""
num_gpus: int = 1
results_dir: Optional[str] = None
input_width: Optional[int] = None
input_height: Optional[int] = None
checkpoint: Optional[str] = None
trt_engine: Optional[str] = None
conf_threshold: float = 0.0
@dataclass
class CalibrationConfig:
"""Calibration config."""
cal_image_dir: List[str] = MISSING
cal_cache_file: str = MISSING
cal_batch_size: int = 1
cal_batches: int = 1
@dataclass
class TrtConfig:
"""Trt config."""
data_type: str = "FP32"
workspace_size: int = 1024
min_batch_size: int = 1
opt_batch_size: int = 1
max_batch_size: int = 1
calibration: CalibrationConfig = CalibrationConfig()
@dataclass
class DINOExportExpConfig:
"""Export experiment config."""
results_dir: Optional[str] = None
gpu_id: int = 0
checkpoint: str = MISSING
onnx_file: str = MISSING
on_cpu: bool = False
input_channel: int = 3
input_width: int = 960
input_height: int = 544
opset_version: int = 12
batch_size: int = -1
verbose: bool = False
@dataclass
class DINOGenTrtEngineExpConfig:
"""Gen TRT Engine experiment config."""
results_dir: Optional[str] = None
gpu_id: int = 0
onnx_file: str = MISSING
trt_engine: Optional[str] = None
input_channel: int = 3
input_width: int = 960
input_height: int = 544
opset_version: int = 12
batch_size: int = -1
verbose: bool = False
tensorrt: TrtConfig = TrtConfig()
@dataclass
class ExperimentConfig:
"""Experiment config."""
model: DINOModelConfig = DINOModelConfig()
dataset: DINODatasetConfig = DINODatasetConfig()
train: DINOTrainExpConfig = DINOTrainExpConfig()
evaluate: DINOEvalExpConfig = DINOEvalExpConfig()
inference: DINOInferenceExpConfig = DINOInferenceExpConfig()
export: DINOExportExpConfig = DINOExportExpConfig()
gen_trt_engine: DINOGenTrtEngineExpConfig = DINOGenTrtEngineExpConfig()
encryption_key: Optional[str] = None
results_dir: str = MISSING
| tao_deploy-main | nvidia_tao_deploy/cv/dino/hydra_config/default_config.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""DINO convert onnx model to TRT engine."""
import logging
import os
import tempfile
from nvidia_tao_deploy.cv.deformable_detr.engine_builder import DDETRDetEngineBuilder
from nvidia_tao_deploy.cv.common.decorators import monitor_status
from nvidia_tao_deploy.cv.common.hydra.hydra_runner import hydra_runner
from nvidia_tao_deploy.cv.dino.hydra_config.default_config import ExperimentConfig
from nvidia_tao_deploy.utils.decoding import decode_model
from nvidia_tao_deploy.engine.builder import NV_TENSORRT_MAJOR, NV_TENSORRT_MINOR, NV_TENSORRT_PATCH
logging.basicConfig(format='%(asctime)s [TAO Toolkit] [%(levelname)s] %(name)s %(lineno)d: %(message)s',
level="INFO")
logger = logging.getLogger(__name__)
spec_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
@hydra_runner(
config_path=os.path.join(spec_root, "specs"),
config_name="gen_trt_engine", schema=ExperimentConfig
)
@monitor_status(name='dino', mode='gen_trt_engine')
def main(cfg: ExperimentConfig) -> None:
"""Convert encrypted uff or onnx model to TRT engine."""
if cfg.gen_trt_engine.results_dir is not None:
results_dir = cfg.gen_trt_engine.results_dir
else:
results_dir = os.path.join(cfg.results_dir, "gen_trt_engine")
os.makedirs(results_dir, exist_ok=True)
# decrypt etlt
tmp_onnx_file, file_format = decode_model(cfg.gen_trt_engine.onnx_file, cfg.encryption_key)
engine_file = cfg.gen_trt_engine.trt_engine
data_type = cfg.gen_trt_engine.tensorrt.data_type
workspace_size = cfg.gen_trt_engine.tensorrt.workspace_size
min_batch_size = cfg.gen_trt_engine.tensorrt.min_batch_size
opt_batch_size = cfg.gen_trt_engine.tensorrt.opt_batch_size
max_batch_size = cfg.gen_trt_engine.tensorrt.max_batch_size
batch_size = cfg.gen_trt_engine.batch_size
num_channels = cfg.gen_trt_engine.input_channel
input_width = cfg.gen_trt_engine.input_width
input_height = cfg.gen_trt_engine.input_height
# INT8 related configs
img_std = cfg.dataset.augmentation.input_std
calib_input = list(cfg.gen_trt_engine.tensorrt.calibration.get('cal_image_dir', []))
calib_cache = cfg.gen_trt_engine.tensorrt.calibration.get('cal_cache_file', None)
if batch_size is None or batch_size == -1:
input_batch_size = 1
is_dynamic = True
else:
input_batch_size = batch_size
is_dynamic = False
# TODO: Remove this when we upgrade to DLFW 23.04+
trt_version_number = NV_TENSORRT_MAJOR * 1000 + NV_TENSORRT_MINOR * 100 + NV_TENSORRT_PATCH
if data_type.lower() == "fp16" and trt_version_number < 8600:
logger.warning("[WARNING]: LayerNorm has overflow issue in FP16 upto TensorRT version 8.5 "
"which can lead to mAP drop compared to FP32.\n"
"[WARNING]: Please re-export ONNX using opset 17 and use TensorRT version 8.6.\n")
if engine_file is not None:
if engine_file is None:
engine_handle, temp_engine_path = tempfile.mkstemp()
os.close(engine_handle)
output_engine_path = temp_engine_path
else:
output_engine_path = engine_file
builder = DDETRDetEngineBuilder(workspace=workspace_size // 1024, # DINO config is not in GB
input_dims=(input_batch_size, num_channels, input_height, input_width),
is_dynamic=is_dynamic,
min_batch_size=min_batch_size,
opt_batch_size=opt_batch_size,
max_batch_size=max_batch_size,
img_std=img_std)
builder.create_network(tmp_onnx_file, file_format)
builder.create_engine(
output_engine_path,
data_type,
calib_input=calib_input,
calib_cache=calib_cache,
calib_num_images=cfg.gen_trt_engine.tensorrt.calibration.cal_batch_size * cfg.gen_trt_engine.tensorrt.calibration.cal_batches,
calib_batch_size=cfg.gen_trt_engine.tensorrt.calibration.cal_batch_size
)
logging.info("Export finished successfully.")
if __name__ == '__main__':
main()
| tao_deploy-main | nvidia_tao_deploy/cv/dino/scripts/gen_trt_engine.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TAO Deploy DINO scripts module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_deploy-main | nvidia_tao_deploy/cv/dino/scripts/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Standalone TensorRT inference."""
import os
import logging
import numpy as np
from PIL import Image
from tqdm.auto import tqdm
from nvidia_tao_deploy.cv.common.decorators import monitor_status
from nvidia_tao_deploy.cv.deformable_detr.inferencer import DDETRInferencer
from nvidia_tao_deploy.cv.deformable_detr.utils import post_process
from nvidia_tao_deploy.cv.dino.hydra_config.default_config import ExperimentConfig
from nvidia_tao_deploy.utils.image_batcher import ImageBatcher
from nvidia_tao_deploy.cv.common.hydra.hydra_runner import hydra_runner
logging.basicConfig(format='%(asctime)s [TAO Toolkit] [%(levelname)s] %(name)s %(lineno)d: %(message)s',
level="INFO")
logger = logging.getLogger(__name__)
spec_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
@hydra_runner(
config_path=os.path.join(spec_root, "specs"),
config_name="infer", schema=ExperimentConfig
)
@monitor_status(name='dino', mode='inference')
def main(cfg: ExperimentConfig) -> None:
"""DINO TRT Inference."""
if not os.path.exists(cfg.inference.trt_engine):
raise FileNotFoundError(f"Provided inference.trt_engine at {cfg.inference.trt_engine} does not exist!")
trt_infer = DDETRInferencer(cfg.inference.trt_engine,
batch_size=cfg.dataset.batch_size,
num_classes=cfg.dataset.num_classes)
c, h, w = trt_infer._input_shape
batcher = ImageBatcher(list(cfg.dataset.infer_data_sources.image_dir),
(cfg.dataset.batch_size, c, h, w),
trt_infer.inputs[0].host.dtype,
preprocessor="DDETR")
with open(cfg.dataset.infer_data_sources.classmap, "r", encoding="utf-8") as f:
classmap = [line.rstrip() for line in f.readlines()]
classes = {c: i + 1 for i, c in enumerate(classmap)}
# Create results directories
if cfg.inference.results_dir is not None:
results_dir = cfg.inference.results_dir
else:
results_dir = os.path.join(cfg.results_dir, "trt_inference")
os.makedirs(results_dir, exist_ok=True)
output_annotate_root = os.path.join(results_dir, "images_annotated")
output_label_root = os.path.join(results_dir, "labels")
os.makedirs(output_annotate_root, exist_ok=True)
os.makedirs(output_label_root, exist_ok=True)
inv_classes = {v: k for k, v in classes.items()}
for batches, img_paths, scales in tqdm(batcher.get_batch(), total=batcher.num_batches, desc="Producing predictions"):
# Handle last batch as we artifically pad images for the last batch idx
if len(img_paths) != len(batches):
batches = batches[:len(img_paths)]
pred_logits, pred_boxes = trt_infer.infer(batches)
target_sizes = []
for batch, scale in zip(batches, scales):
_, new_h, new_w = batch.shape
orig_h, orig_w = int(scale[0] * new_h), int(scale[1] * new_w)
target_sizes.append([orig_w, orig_h, orig_w, orig_h])
class_labels, scores, boxes = post_process(pred_logits, pred_boxes, target_sizes)
y_pred_valid = np.concatenate([class_labels[..., None], scores[..., None], boxes], axis=-1)
for img_path, pred in zip(img_paths, y_pred_valid):
# Load Image
img = Image.open(img_path)
# Resize of the original input image is not required for D-DETR
# as the predictions are rescaled in post_process
bbox_img, label_strings = trt_infer.draw_bbox(img, pred, inv_classes, cfg.inference.conf_threshold, cfg.inference.color_map)
img_filename = os.path.basename(img_path)
bbox_img.save(os.path.join(output_annotate_root, img_filename))
# Store labels
filename, _ = os.path.splitext(img_filename)
label_file_name = os.path.join(output_label_root, filename + ".txt")
with open(label_file_name, "w", encoding="utf-8") as f:
for l_s in label_strings:
f.write(l_s)
logging.info("Finished inference.")
if __name__ == '__main__':
main()
| tao_deploy-main | nvidia_tao_deploy/cv/dino/scripts/inference.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Standalone TensorRT inference."""
import os
import operator
import copy
import logging
import json
import six
import numpy as np
from tqdm.auto import tqdm
from nvidia_tao_deploy.cv.common.decorators import monitor_status
from nvidia_tao_deploy.cv.common.hydra.hydra_runner import hydra_runner
from nvidia_tao_deploy.cv.deformable_detr.dataloader import DDETRCOCOLoader
from nvidia_tao_deploy.cv.deformable_detr.inferencer import DDETRInferencer
from nvidia_tao_deploy.cv.deformable_detr.utils import post_process
from nvidia_tao_deploy.cv.dino.hydra_config.default_config import ExperimentConfig
from nvidia_tao_deploy.metrics.coco_metric import EvaluationMetric
logging.basicConfig(format='%(asctime)s [TAO Toolkit] [%(levelname)s] %(name)s %(lineno)d: %(message)s',
level="INFO")
logger = logging.getLogger(__name__)
spec_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
@hydra_runner(
config_path=os.path.join(spec_root, "specs"),
config_name="evaluate", schema=ExperimentConfig
)
@monitor_status(name='dino', mode='evaluation')
def main(cfg: ExperimentConfig) -> None:
"""DINO TRT evaluation."""
if not os.path.exists(cfg.evaluate.trt_engine):
raise FileNotFoundError(f"Provided evaluate.trt_engine at {cfg.evaluate.trt_engine} does not exist!")
eval_metric = EvaluationMetric(cfg.dataset.test_data_sources.json_file,
eval_class_ids=cfg.dataset.eval_class_ids,
include_mask=False)
trt_infer = DDETRInferencer(cfg.evaluate.trt_engine,
batch_size=cfg.dataset.batch_size,
num_classes=cfg.dataset.num_classes)
c, h, w = trt_infer._input_shape
dl = DDETRCOCOLoader(
val_json_file=cfg.dataset.test_data_sources.json_file,
shape=(cfg.dataset.batch_size, c, h, w),
dtype=trt_infer.inputs[0].host.dtype,
batch_size=cfg.dataset.batch_size,
data_format="channels_first",
image_std=cfg.dataset.augmentation.input_std,
image_dir=cfg.dataset.test_data_sources.image_dir,
eval_samples=None)
predictions = {
'detection_scores': [],
'detection_boxes': [],
'detection_classes': [],
'source_id': [],
'image_info': [],
'num_detections': []
}
def evaluation_preds(preds):
# Essential to avoid modifying the source dict
_preds = copy.deepcopy(preds)
for k, _ in six.iteritems(_preds):
_preds[k] = np.concatenate(_preds[k], axis=0)
eval_results = eval_metric.predict_metric_fn(_preds)
return eval_results
for imgs, scale, source_id, labels in tqdm(dl, total=len(dl), desc="Producing predictions"):
image = np.array(imgs)
image_info = []
target_sizes = []
for i, label in enumerate(labels):
image_info.append([label[-1][0], label[-1][1], scale[i], label[-1][2], label[-1][3]])
# target_sizes needs to [W, H, W, H]
target_sizes.append([label[-1][3], label[-1][2], label[-1][3], label[-1][2]])
image_info = np.array(image_info)
pred_logits, pred_boxes = trt_infer.infer(image)
class_labels, scores, boxes = post_process(pred_logits, pred_boxes, target_sizes, num_select=cfg.model.num_select)
# Convert to xywh
boxes[:, :, 2:] -= boxes[:, :, :2]
predictions['detection_classes'].append(class_labels)
predictions['detection_scores'].append(scores)
predictions['detection_boxes'].append(boxes)
predictions['num_detections'].append(np.array([100] * cfg.dataset.batch_size).astype(np.int32))
predictions['image_info'].append(image_info)
predictions['source_id'].append(source_id)
if cfg.evaluate.results_dir is not None:
results_dir = cfg.evaluate.results_dir
else:
results_dir = os.path.join(cfg.results_dir, "trt_evaluate")
os.makedirs(results_dir, exist_ok=True)
eval_results = evaluation_preds(preds=predictions)
for key, value in sorted(eval_results.items(), key=operator.itemgetter(0)):
eval_results[key] = float(value)
logging.info("%s: %.9f", key, value)
with open(os.path.join(results_dir, "results.json"), "w", encoding="utf-8") as f:
json.dump(eval_results, f)
logging.info("Finished evaluation.")
if __name__ == '__main__':
main()
| tao_deploy-main | nvidia_tao_deploy/cv/dino/scripts/evaluate.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Entrypoint module for DINO."""
| tao_deploy-main | nvidia_tao_deploy/cv/dino/entrypoint/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TAO Deploy command line wrapper to invoke CLI scripts."""
import argparse
from nvidia_tao_deploy.cv.dino import scripts
from nvidia_tao_deploy.cv.common.entrypoint.entrypoint_hydra import get_subtasks, launch
def main():
"""Main entrypoint wrapper."""
# Create parser for a given task.
parser = argparse.ArgumentParser(
"dino",
add_help=True,
description="Train Adapt Optimize Deploy entrypoint for DINO"
)
# Build list of subtasks by inspecting the scripts package.
subtasks = get_subtasks(scripts)
# Parse the arguments and launch the subtask.
launch(parser, subtasks, network="dino")
if __name__ == '__main__':
main()
| tao_deploy-main | nvidia_tao_deploy/cv/dino/entrypoint/dino.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility class for performing TensorRT image inference."""
import logging
import numpy as np
import tensorrt as trt
from nvidia_tao_deploy.inferencer.trt_inferencer import TRTInferencer
from nvidia_tao_deploy.inferencer.utils import allocate_buffers, do_inference
logger = logging.getLogger(__name__)
class OpticalInspectionInferencer(TRTInferencer):
"""Manages TensorRT objects for model inference."""
def __init__(self, engine_path, input_shape=None, batch_size=None, data_format="channel_first"):
"""Initializes TensorRT objects needed for model inference.
Args:
engine_path (str): path where TensorRT engine should be stored
input_shape (tuple): (batch, channel, height, width) for dynamic shape engine
batch_size (int): batch size for dynamic shape engine
data_format (str): either channel_first or channel_last
"""
# Load TRT engine
logger.info("Loading engine from {}".format(engine_path))
super().__init__(engine_path)
self.execute_v2 = True
# Allocate memory for multiple usage [e.g. multiple batch inference]
self._input_shape = []
for binding in range(self.engine.num_bindings):
if self.engine.binding_is_input(binding):
self._input_shape.append(self.engine.get_binding_shape(binding)[-3:])
self.max_batch_size = self.engine.get_binding_shape(binding)[0]
for shape in self._input_shape:
assert len(shape) == 3, "Engine doesn't have valid input dimensions"
if data_format == "channel_first":
self.height = self._input_shape[0][1]
self.width = self._input_shape[0][2]
else:
self.height = self._input_shape[0][0]
self.width = self._input_shape[0][1]
# TODO: vpraveen. Temporarily disabling dynamic batch size profiling
# till we figure out how to handle multiple inputs and fixing the
# export routine in the pytorch container.
# set binding_shape for dynamic input
# for binding in range(self.engine.num_bindings):
# if self.engine.binding_is_input(binding):
# binding_id = self.engine.get_binding_index(str(binding))
# if (input_shape is not None) or (batch_size is not None):
# self.context = self.engine.create_execution_context()
# if input_shape is not None:
# for idx, _input_shape in enumerate(input_shape):
# self.context.set_binding_shape(binding_id, input_shape[idx])
# self.max_batch_size = input_shape[idx][0]
# else:
# for idx, _input_shape in enumerate(self._input_shape):
# self.context.set_binding_shape(idx, [batch_size] + list(_input_shape))
# self.max_batch_size = batch_size
# self.execute_v2 = True
# This allocates memory for network inputs/outputs on both CPU and GPU
self.inputs, self.outputs, self.bindings, self.stream = allocate_buffers(self.engine,
self.context)
if self.context is None:
self.context = self.engine.create_execution_context()
input_volumes = [trt.volume(shape) for shape in self._input_shape]
self.numpy_array = [
np.zeros((self.max_batch_size, volume)) for volume in input_volumes
]
def infer(self, input_images):
"""Infers model on batch of same sized images resized to fit the model.
Args:
image_paths (str): paths to images, that will be packed into batch
and fed into model
"""
# Verify if the supplied batch size is not too big
max_batch_size = self.max_batch_size
for idx, input_image in enumerate(input_images, start=0):
actual_batch_size = len(input_image)
if actual_batch_size > max_batch_size:
raise ValueError(
f"image_paths list bigger ({actual_batch_size}) than"
f"engine max batch size ({max_batch_size})"
)
self.numpy_array[idx][:actual_batch_size] = input_image.reshape(actual_batch_size, -1)
# ...copy them into appropriate place into memory...
# (self.inputs was returned earlier by allocate_buffers())
np.copyto(self.inputs[idx].host, self.numpy_array[idx].ravel())
# ...fetch model outputs...
results = do_inference(
self.context, bindings=self.bindings, inputs=self.inputs,
outputs=self.outputs, stream=self.stream,
batch_size=max_batch_size,
execute_v2=self.execute_v2)
# ...and return results up to the actual batch size.
return [i.reshape(max_batch_size, -1)[:actual_batch_size] for i in results]
def __del__(self):
"""Clear things up on object deletion."""
# Clear session and buffer
if self.trt_runtime:
del self.trt_runtime
if self.context:
del self.context
if self.engine:
del self.engine
if self.stream:
del self.stream
# Loop through inputs and free inputs.
for inp in self.inputs:
inp.device.free()
# Loop through outputs and free them.
for out in self.outputs:
out.device.free()
| tao_deploy-main | nvidia_tao_deploy/cv/optical_inspection/inferencer.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""OpticalInpsection TensorRT engine builder."""
import logging
import os
import sys
import onnx
import tensorrt as trt
from nvidia_tao_deploy.engine.builder import EngineBuilder
logging.basicConfig(format='%(asctime)s [TAO Toolkit] [%(levelname)s] %(name)s %(lineno)d: %(message)s',
level="INFO")
logger = logging.getLogger(__name__)
class OpticalInpsectionEngineBuilder(EngineBuilder):
"""Parses an ONNX graph and builds a TensorRT engine from it."""
def __init__(
self,
data_format="channels_first",
**kwargs
):
"""Init.
Args:
data_format (str): data_format.
"""
super().__init__(**kwargs)
self._data_format = data_format
def set_input_output_node_names(self):
"""Set input output node names."""
self._output_node_names = ["siam_pred", "208"]
self._input_node_names = ["input_1", "input_2"]
def get_onnx_input_dims(self, model_path):
"""Get input dimension of ONNX model."""
onnx_model = onnx.load(model_path)
onnx_inputs = onnx_model.graph.input
logger.info('List inputs:')
input_dims = {}
for idx, inputs in enumerate(onnx_inputs):
logger.info('Input %s -> %s.', idx, inputs.name)
logger.info('%s.', [i.dim_value for i in inputs.type.tensor_type.shape.dim][1:])
logger.info('%s.', [i.dim_value for i in inputs.type.tensor_type.shape.dim][0])
input_dims[inputs.name] = [i.dim_value for i in inputs.type.tensor_type.shape.dim][:]
return input_dims
def create_network(self, model_path, file_format="onnx"):
"""Parse the UFF/ONNX graph and create the corresponding TensorRT network definition.
Args:
model_path: The path to the UFF/ONNX graph to load.
file_format: The file format of the decrypted etlt file (default: onnx).
"""
if file_format == "onnx":
logger.info("Parsing ONNX model")
self._input_dims = self.get_onnx_input_dims(model_path)
batch_sizes = {v[0] for v in self._input_dims.values()}
assert len(batch_sizes), (
"All tensors should have the same batch size."
)
self.batch_size = list(batch_sizes)[0]
for k, v in self._input_dims.items():
self._input_dims[k] = v[1:]
network_flags = 1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
network_flags = network_flags | (1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_PRECISION))
self.network = self.builder.create_network(network_flags)
self.parser = trt.OnnxParser(self.network, self.trt_logger)
model_path = os.path.realpath(model_path)
with open(model_path, "rb") as f:
if not self.parser.parse(f.read()):
logger.error("Failed to load ONNX file: %s", model_path)
for error in range(self.parser.num_errors):
logger.error(self.parser.get_error(error))
sys.exit(1)
inputs = [self.network.get_input(i) for i in range(self.network.num_inputs)]
outputs = [self.network.get_output(i) for i in range(self.network.num_outputs)]
logger.info("Network Description")
for input in inputs: # noqa pylint: disable=W0622
logger.info("Input '%s' with shape %s and dtype %s", input.name, input.shape, input.dtype)
for output in outputs:
logger.info("Output '%s' with shape %s and dtype %s", output.name, output.shape, output.dtype)
if self.batch_size <= 0: # dynamic batch size
logger.info("Dynamic batch size handling")
opt_profile = self.builder.create_optimization_profile()
model_input = self.network.get_input(0)
for i in range(self.network.num_inputs):
model_input = self.network.get_input(i)
input_shape = model_input.shape
input_name = model_input.name
real_shape_min = (
self.min_batch_size, input_shape[1],
input_shape[2], input_shape[3]
)
real_shape_opt = (
self.opt_batch_size, input_shape[1],
input_shape[2], input_shape[3]
)
real_shape_max = (
self.max_batch_size, input_shape[1],
input_shape[2], input_shape[3]
)
opt_profile.set_shape(
input=input_name,
min=real_shape_min,
opt=real_shape_opt,
max=real_shape_max
)
self.config.add_optimization_profile(opt_profile)
else:
logger.info("Parsing UFF model")
raise NotImplementedError("UFF for Optical Inspection is not supported")
def create_engine(self, engine_path, precision,
calib_input=None, calib_cache=None, calib_num_images=5000,
calib_batch_size=8, calib_data_file=None):
"""Build the TensorRT engine and serialize it to disk.
Args:
engine_path: The path where to serialize the engine to.
precision: The datatype to use for the engine, either 'fp32', 'fp16' or 'int8'.
calib_input: The path to a directory holding the calibration images.
calib_cache: The path where to write the calibration cache to,
or if it already exists, load it from.
calib_num_images: The maximum number of images to use for calibration.
calib_batch_size: The batch size to use for the calibration process.
"""
engine_path = os.path.realpath(engine_path)
engine_dir = os.path.dirname(engine_path)
os.makedirs(engine_dir, exist_ok=True)
logger.debug("Building %s Engine in %s", precision, engine_path)
if self.batch_size is None:
self.batch_size = calib_batch_size
self.builder.max_batch_size = self.batch_size
if precision == "fp16":
if not self.builder.platform_has_fast_fp16:
logger.warning("FP16 is not supported natively on this platform/device")
else:
self.config.set_flag(trt.BuilderFlag.FP16)
elif precision == "int8":
raise NotImplementedError("INT8 is not supported for Optical Inspection!")
print(f"Engine path: {engine_path}")
with self.builder.build_engine(self.network, self.config) as engine, \
open(engine_path, "wb") as f:
logger.debug("Serializing engine to file: %s", engine_path)
f.write(engine.serialize())
| tao_deploy-main | nvidia_tao_deploy/cv/optical_inspection/engine_builder.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TAO Deploy OpticalInpsection."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_deploy-main | nvidia_tao_deploy/cv/optical_inspection/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Optical Inspection loader."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import os
from abc import ABC
import cv2
import numpy as np
import pandas as pd
from PIL import Image
from nvidia_tao_deploy.inferencer.preprocess_input import preprocess_input
class OpticalInspectionDataLoader(ABC):
"""Optical Inpsection Dataloader."""
def __init__(
self,
csv_file=None,
transform=None,
input_data_path=None,
train=False,
data_config=None,
dtype=np.float32):
"""Initialize the Optical Inspection dataloader."""
if not os.path.exists(csv_file):
raise FileNotFoundError(f"Inference data csv file wasn't found at {csv_file}")
self.merged = pd.read_csv(csv_file)
self.transform = transform
self.input_image_root = input_data_path
self.train = train
self.num_inputs = data_config.num_input
self.concat_type = data_config.concat_type
self.input_map = data_config.input_map
self.grid_map = data_config.grid_map
self.output_shape = data_config.output_shape
self.data_config = data_config
self.ext = data_config.image_ext
self.batch_size = data_config.batch_size
self.dtype = dtype
self.n_batches = math.ceil(float(len(self.merged)) / self.batch_size)
assert self.n_batches > 0, (
f"There should atleast be 1 batch to load. {self.n_batches}"
)
self.n = 0
if self.concat_type == "grid":
print(
f"Using {self.num_inputs} input and {self.concat_type} type "
f"{self.grid_map['x']} X {self.grid_map['y']} for comparison."
)
else:
print(
f"Using {self.num_inputs} input and {self.concat_type} type "
f"1 X {self.num_inputs} for comparison."
)
def __iter__(self):
"""Initialize iterator."""
self.n = 0
return self
def __next__(self):
"""Get the next image."""
if self.n < self.n_batches:
start_idx = self.batch_size * self.n
unit_batch = []
golden_batch = []
end_idx = min(start_idx + self.batch_size, len(self.merged))
for idx in range(start_idx, end_idx):
unit_array, golden_array = self.__getitem__(idx)
unit_batch.append(unit_array)
golden_batch.append(golden_array)
self.n += 1
return np.asarray(unit_batch, dtype=unit_array.dtype), np.asarray(golden_batch, dtype=golden_array.dtype)
raise StopIteration
def get_absolute_image_path(self, prefix, input_map=None):
"""Get absolute image path."""
image_path = prefix
if input_map:
image_path += f"_{input_map}"
image_path += self.ext
if not os.path.exists(image_path):
raise FileNotFoundError(f"Image file wasn't found at {image_path}")
return image_path
def __getitem__(self, index):
"""Yield a single image."""
image_tuple = self.merged.iloc[index, :]
image_0, image_1 = [], []
if self.input_map:
for input_map in self.input_map:
image_0.append(
Image.open(
self.get_absolute_image_path(
self.get_unit_path(image_tuple),
input_map=input_map
)
)
)
image_1.append(
Image.open(
self.get_absolute_image_path(
self.get_golden_sample_path(image_tuple),
input_map=input_map
)
)
)
else:
image_0.append(
Image.open(
self.get_absolute_image_path(
self.get_unit_path(image_tuple)
)
)
)
image_1.append(
Image.open(
self.get_absolute_image_path(
self.get_golden_sample_path(image_tuple)
)
)
)
size = (self.output_shape[0], self.output_shape[1])
preprocessed_image_0 = self.preprocess_single_sample(
image_0, size,
self.data_config.augmentation_config.rgb_input_mean,
self.data_config.augmentation_config.rgb_input_std,
self.dtype
)
preprocessed_image_1 = self.preprocess_single_sample(
image_1, size,
self.data_config.augmentation_config.rgb_input_mean,
self.data_config.augmentation_config.rgb_input_std,
self.dtype
)
concatenated_unit_sample = self.concatenate_image(preprocessed_image_0)
concatenated_golden_sample = self.concatenate_image(preprocessed_image_1)
return concatenated_unit_sample, concatenated_golden_sample
def concatenate_image(self, preprocessed_image_array):
"""Concatenated image array from processed input.
Args:
preprocessed_image_array (list(PIL.Image)): List of image inputs.
Returns:
concatenated_image (np.ndarray): Concatenated image input.
"""
if self.concat_type == "grid" and int(self.num_inputs) % 2 == 0:
x, y = int(self.grix_map["x"]), int(self.grid_map["y"])
concatenated_image = np.zeros((3, x * self.output_shape[0], y * self.output_shape[1]))
for idx in range(x):
for idy in range(y):
concatenated_image[
:,
idx * self.output_shape[0],
idy * self.output_shape[1]] = preprocessed_image_array[idx * x + idy]
else:
concatenated_image = np.zeros((
3,
self.num_inputs * self.output_shape[0],
self.output_shape[1]))
for idx in range(self.num_inputs):
concatenated_image[
:,
idx * self.output_shape[0]: self.output_shape[0] * idx + self.output_shape[0],
:] = preprocessed_image_array[idx]
return concatenated_image
@staticmethod
def preprocess_single_sample(image_array, output_shape, mean, std, dtype):
"""Apply pre-processing to a single image."""
assert isinstance(output_shape, tuple), "Output shape must be a tuple."
image_output = []
for image in image_array:
resized_image = cv2.resize(
np.asarray(image, dtype),
output_shape, interpolation=cv2.INTER_LINEAR)
resized_image = np.transpose(resized_image, (2, 0, 1))
resized_image = preprocess_input(
resized_image,
data_format="channels_first",
img_mean=mean,
img_std=std,
mode="torch"
)
image_output.append(resized_image)
return image_output
def __len__(self):
"""Length of the dataloader."""
return self.n_batches
def get_unit_path(self, image_tuple):
"""Get path to the image file from csv."""
image_path = os.path.join(
self.input_image_root,
image_tuple["input_path"],
image_tuple["object_name"]
)
return image_path
def get_golden_sample_path(self, image_tuple):
"""Get path to the corresponding golden sample."""
image_path = os.path.join(
self.input_image_root,
image_tuple["golden_path"],
image_tuple["object_name"]
)
return image_path
| tao_deploy-main | nvidia_tao_deploy/cv/optical_inspection/dataloader.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TAO Deploy OpticalInpsection config."""
| tao_deploy-main | nvidia_tao_deploy/cv/optical_inspection/config/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Default config file"""
from typing import Optional, List, Dict
from dataclasses import dataclass, field
from omegaconf import MISSING
@dataclass
class OIModelConfig:
"""Optical recognition model config."""
model_type: str = "Siamese_3"
margin: float = 2.0
model_backbone: str = "custom"
embedding_vectors: int = 5
imagenet_pretrained: bool = False
@dataclass
class OptimConfig:
"""Optimizer config."""
type: str = "Adam"
lr: float = 5e-4
momentum: float = 0.9
weight_decay: float = 5e-4
@dataclass
class OIAugmentationConfig:
"""Augmentation config."""
rgb_input_mean: List[float] = field(default_factory=lambda: [0.485, 0.456, 0.406])
rgb_input_std: List[float] = field(default_factory=lambda: [0.229, 0.224, 0.225])
@dataclass
class DataPathFormat:
"""Dataset Path experiment config."""
csv_path: str = MISSING
images_dir: str = MISSING
@dataclass
class OIDatasetConfig:
"""Dataset config."""
train_dataset: DataPathFormat = DataPathFormat()
validation_dataset: DataPathFormat = DataPathFormat()
test_dataset: DataPathFormat = DataPathFormat()
infer_dataset: DataPathFormat = DataPathFormat()
image_ext: Optional[str] = None
batch_size: int = 32
workers: int = 8
fpratio_sampling: float = 0.1
num_input: int = 8
input_map: Optional[Dict[str, int]] = None
grid_map: Optional[Dict[str, int]] = None
concat_type: Optional[str] = None
output_shape: List[int] = field(default_factory=lambda: [100, 100])
augmentation_config: OIAugmentationConfig = OIAugmentationConfig()
@dataclass
class TensorBoardLogger:
"""Configuration for the tensorboard logger."""
enabled: bool = False
infrequent_logging_frequency: int = 2 # Defined per epoch
@dataclass
class OITrainExpConfig:
"""Train experiment config."""
optim: OptimConfig = OptimConfig()
num_epochs: int = 10
checkpoint_interval: int = 2
validation_interval: int = 2
loss: Optional[str] = None
clip_grad_norm: float = 0.0
gpu_ids: List[int] = field(default_factory=lambda: [0])
results_dir: Optional[str] = None
tensorboard: Optional[TensorBoardLogger] = TensorBoardLogger()
resume_training_checkpoint_path: Optional[str] = None
pretrained_model_path: Optional[str] = None
@dataclass
class OIInferenceExpConfig:
"""Inference experiment config."""
checkpoint: str = MISSING
trt_engine: str = MISSING
gpu_id: int = 0
results_dir: Optional[str] = None
batch_size: int = 1
@dataclass
class OIEvalExpConfig:
"""Evaluation experiment config."""
checkpoint: str = MISSING
gpu_id: int = 0
batch_size: int = 1
results_dir: Optional[str] = None
@dataclass
class OIExportExpConfig:
"""Export experiment config."""
results_dir: Optional[str] = None
checkpoint: str = MISSING
onnx_file: Optional[str] = None
opset_version: Optional[int] = 12
gpu_id: int = 0
on_cpu: bool = False
input_height: int = 400
input_width: int = 100
input_channel: int = 3
batch_size: int = -1
do_constant_folding: bool = False
@dataclass
class CalibrationConfig:
"""Calibration config."""
cal_image_dir: List[str] = MISSING
cal_cache_file: str = MISSING
cal_batch_size: int = 1
cal_batches: int = 1
@dataclass
class TrtConfig:
"""Trt config."""
data_type: str = "fp16"
workspace_size: int = 1024
min_batch_size: int = 1
opt_batch_size: int = 1
max_batch_size: int = 1
calibration: CalibrationConfig = CalibrationConfig()
@dataclass
class OIGenTrtEngineExpConfig:
"""Gen TRT Engine experiment config."""
results_dir: Optional[str] = None
gpu_id: int = 0
onnx_file: str = MISSING
trt_engine: Optional[str] = None
input_channel: int = 3
input_width: int = 400
input_height: int = 100
opset_version: int = 12
batch_size: int = -1
verbose: bool = False
tensorrt: TrtConfig = TrtConfig()
@dataclass
class OIDatasetConvertConfig:
"""Dataset Convert experiment config."""
root_dataset_dir: Optional[str] = None
data_convert_output_dir: Optional[str] = None
train_pcb_dataset_dir: Optional[str] = None
val_pcb_dataset_dir: Optional[str] = None
all_pcb_dataset_dir: Optional[str] = None
golden_csv_dir: Optional[str] = None
project_name: Optional[str] = None
bot_top: Optional[str] = None
@dataclass
class OIExperimentConfig:
"""Experiment config."""
model: OIModelConfig = OIModelConfig()
dataset: OIDatasetConfig = OIDatasetConfig()
train: OITrainExpConfig = OITrainExpConfig()
evaluate: OIEvalExpConfig = OIEvalExpConfig()
export: OIExportExpConfig = OIExportExpConfig()
inference: OIInferenceExpConfig = OIInferenceExpConfig()
dataset_convert: OIDatasetConvertConfig = OIDatasetConvertConfig()
gen_trt_engine: OIGenTrtEngineExpConfig = OIGenTrtEngineExpConfig()
encryption_key: Optional[str] = None
results_dir: str = MISSING
| tao_deploy-main | nvidia_tao_deploy/cv/optical_inspection/config/default_config.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing default specification."""
| tao_deploy-main | nvidia_tao_deploy/cv/optical_inspection/specs/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""OpticalInpsection convert etlt model to TRT engine."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import os
import tempfile
from nvidia_tao_deploy.cv.common.decorators import monitor_status
from nvidia_tao_deploy.cv.optical_inspection.engine_builder import OpticalInpsectionEngineBuilder
from nvidia_tao_deploy.cv.common.hydra.hydra_runner import hydra_runner
from nvidia_tao_deploy.cv.optical_inspection.config.default_config import (
OIExperimentConfig as ExperimentConfig
)
from nvidia_tao_deploy.utils.decoding import decode_model
logging.basicConfig(format='%(asctime)s [TAO Toolkit] [%(levelname)s] %(name)s %(lineno)d: %(message)s',
level="INFO")
logger = logging.getLogger(__name__)
spec_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
@hydra_runner(
config_path=os.path.join(spec_root, "specs"),
config_name="experiment", schema=ExperimentConfig
)
@monitor_status(name="optical_inspection", mode="gen_trt_engine")
def main(cfg: ExperimentConfig) -> None:
"""Convert encrypted uff or onnx model to TRT engine."""
# decrypt etlt
trt_cfg = cfg.gen_trt_engine
# decrypt onnx or etlt
tmp_onnx_file, file_format = decode_model(trt_cfg.onnx_file, cfg['encryption_key'])
engine_file = trt_cfg.trt_engine
batch_size = trt_cfg.batch_size
data_type = trt_cfg.tensorrt['data_type']
workspace_size = trt_cfg.tensorrt['workspace_size']
min_batch_size = trt_cfg.tensorrt['min_batch_size']
opt_batch_size = trt_cfg.tensorrt['opt_batch_size']
max_batch_size = trt_cfg.tensorrt['max_batch_size']
if engine_file is None:
engine_handle, temp_engine_path = tempfile.mkstemp()
os.close(engine_handle)
output_engine_path = temp_engine_path
else:
output_engine_path = engine_file
builder = OpticalInpsectionEngineBuilder(
verbose=trt_cfg.verbose,
workspace=workspace_size,
batch_size=batch_size,
min_batch_size=min_batch_size,
opt_batch_size=opt_batch_size,
max_batch_size=max_batch_size)
builder.create_network(tmp_onnx_file, file_format)
builder.create_engine(
output_engine_path,
data_type)
logging.info("Engine generation finished successfully.")
if __name__ == '__main__':
main()
| tao_deploy-main | nvidia_tao_deploy/cv/optical_inspection/scripts/gen_trt_engine.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TAO Deploy OpticalInpsection scripts module."""
| tao_deploy-main | nvidia_tao_deploy/cv/optical_inspection/scripts/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Optical Inspection TensorRT inference."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import os
from nvidia_tao_deploy.cv.common.decorators import monitor_status
from nvidia_tao_deploy.cv.common.hydra.hydra_runner import hydra_runner
from nvidia_tao_deploy.cv.optical_inspection.inferencer import OpticalInspectionInferencer
from nvidia_tao_deploy.cv.optical_inspection.dataloader import OpticalInspectionDataLoader
from nvidia_tao_deploy.cv.optical_inspection.config.default_config import (
OIExperimentConfig as ExperimentConfig
)
from sklearn import metrics
from tqdm import tqdm
logging.basicConfig(format='%(asctime)s [TAO Toolkit] [%(levelname)s] %(name)s %(lineno)d: %(message)s',
level="INFO")
logger = logging.getLogger(__name__)
spec_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
@hydra_runner(
config_path=os.path.join(spec_root, "specs"),
config_name="experiment", schema=ExperimentConfig
)
@monitor_status(name="optical_inspection", mode="inference")
def main(cfg: ExperimentConfig) -> None:
"""Convert encrypted uff or onnx model to TRT engine."""
logger.info("Running inference")
engine_file = cfg.inference.trt_engine
batch_size = cfg.inference.batch_size
dataset_config = cfg.dataset
if cfg.inference.results_dir is not None:
results_dir = cfg.inference.results_dir
else:
results_dir = os.path.join(cfg.results_dir, "inference")
os.makedirs(results_dir, exist_ok=True)
logger.info("Instantiate the optical inspection inferencer.")
optical_inspection_inferencer = OpticalInspectionInferencer(
engine_path=engine_file,
batch_size=batch_size
)
logger.info("Instantiating the optical inspection dataloader.")
infer_dataloader = OpticalInspectionDataLoader(
csv_file=dataset_config.infer_dataset.csv_path,
input_data_path=dataset_config.infer_dataset.images_dir,
train=False,
data_config=dataset_config,
dtype=optical_inspection_inferencer.inputs[0].host.dtype
)
inference_score = []
total_num_samples = len(infer_dataloader)
logger.info("Number of sample batches: {}".format(total_num_samples))
logger.info("Running inference")
for unit_batch, golden_batch in tqdm(infer_dataloader, total=total_num_samples):
input_batches = [
unit_batch,
golden_batch
]
results = optical_inspection_inferencer.infer(input_batches)
pairwise_output = metrics.pairwise.paired_distances(results[0], results[1], metric="euclidean")
inference_score.extend(
[pairwise_output[idx] for idx in range(pairwise_output.shape[0])]
)
logger.info("Total number of inference outputs: {}".format(len(inference_score)))
infer_dataloader.merged["output_score"] = inference_score[:len(infer_dataloader.merged)]
infer_dataloader.merged.to_csv(
os.path.join(results_dir, "inference.csv"),
header=True,
index=False
)
if __name__ == "__main__":
main()
| tao_deploy-main | nvidia_tao_deploy/cv/optical_inspection/scripts/inference.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Entrypoint module for optical_inspection."""
| tao_deploy-main | nvidia_tao_deploy/cv/optical_inspection/entrypoint/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TAO Deploy command line wrapper to invoke CLI scripts."""
import argparse
from nvidia_tao_deploy.cv.optical_inspection import scripts
from nvidia_tao_deploy.cv.common.entrypoint.entrypoint_hydra import get_subtasks, launch
def main():
"""Main entrypoint wrapper."""
# Create parser for a given task.
parser = argparse.ArgumentParser(
"optical_inspection",
add_help=True,
description="Train Adapt Optimize Deploy entrypoint for OpticalInpsection"
)
# Build list of subtasks by inspecting the scripts package.
subtasks = get_subtasks(scripts)
# Parse the arguments and launch the subtask.
launch(
parser,
subtasks,
override_results_dir="results_dir",
override_key="encryption_key"
)
if __name__ == '__main__':
main()
| tao_deploy-main | nvidia_tao_deploy/cv/optical_inspection/entrypoint/optical_inspection.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TAO Deploy YOLOv4."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_deploy-main | nvidia_tao_deploy/cv/yolo_v4/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TAO Deploy YOLOv4 Proto."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_deploy-main | nvidia_tao_deploy/cv/yolo_v4/proto/__init__.py |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nvidia_tao_deploy/cv/yolo_v4/proto/augmentation_config.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='nvidia_tao_deploy/cv/yolo_v4/proto/augmentation_config.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n<nvidia_tao_deploy/cv/yolo_v4/proto/augmentation_config.proto\"\xa1\x03\n\x12\x41ugmentationConfig\x12\x0b\n\x03hue\x18\x01 \x01(\x02\x12\x12\n\nsaturation\x18\x02 \x01(\x02\x12\x10\n\x08\x65xposure\x18\x03 \x01(\x02\x12\x15\n\rvertical_flip\x18\x04 \x01(\x02\x12\x17\n\x0fhorizontal_flip\x18\x05 \x01(\x02\x12\x0e\n\x06jitter\x18\x06 \x01(\x02\x12\x14\n\x0coutput_width\x18\x07 \x01(\x05\x12\x15\n\routput_height\x18\x08 \x01(\x05\x12\x16\n\x0eoutput_channel\x18\t \x01(\x05\x12\x14\n\x0coutput_depth\x18\x0e \x01(\r\x12$\n\x1crandomize_input_shape_period\x18\n \x01(\x05\x12\x13\n\x0bmosaic_prob\x18\x0b \x01(\x02\x12\x18\n\x10mosaic_min_ratio\x18\x0c \x01(\x02\x12\x36\n\nimage_mean\x18\r \x03(\x0b\x32\".AugmentationConfig.ImageMeanEntry\x1a\x30\n\x0eImageMeanEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x02:\x02\x38\x01\x62\x06proto3')
)
_AUGMENTATIONCONFIG_IMAGEMEANENTRY = _descriptor.Descriptor(
name='ImageMeanEntry',
full_name='AugmentationConfig.ImageMeanEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='AugmentationConfig.ImageMeanEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='AugmentationConfig.ImageMeanEntry.value', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=_b('8\001'),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=434,
serialized_end=482,
)
_AUGMENTATIONCONFIG = _descriptor.Descriptor(
name='AugmentationConfig',
full_name='AugmentationConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='hue', full_name='AugmentationConfig.hue', index=0,
number=1, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='saturation', full_name='AugmentationConfig.saturation', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='exposure', full_name='AugmentationConfig.exposure', index=2,
number=3, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='vertical_flip', full_name='AugmentationConfig.vertical_flip', index=3,
number=4, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='horizontal_flip', full_name='AugmentationConfig.horizontal_flip', index=4,
number=5, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='jitter', full_name='AugmentationConfig.jitter', index=5,
number=6, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='output_width', full_name='AugmentationConfig.output_width', index=6,
number=7, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='output_height', full_name='AugmentationConfig.output_height', index=7,
number=8, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='output_channel', full_name='AugmentationConfig.output_channel', index=8,
number=9, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='output_depth', full_name='AugmentationConfig.output_depth', index=9,
number=14, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='randomize_input_shape_period', full_name='AugmentationConfig.randomize_input_shape_period', index=10,
number=10, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='mosaic_prob', full_name='AugmentationConfig.mosaic_prob', index=11,
number=11, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='mosaic_min_ratio', full_name='AugmentationConfig.mosaic_min_ratio', index=12,
number=12, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='image_mean', full_name='AugmentationConfig.image_mean', index=13,
number=13, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_AUGMENTATIONCONFIG_IMAGEMEANENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=65,
serialized_end=482,
)
_AUGMENTATIONCONFIG_IMAGEMEANENTRY.containing_type = _AUGMENTATIONCONFIG
_AUGMENTATIONCONFIG.fields_by_name['image_mean'].message_type = _AUGMENTATIONCONFIG_IMAGEMEANENTRY
DESCRIPTOR.message_types_by_name['AugmentationConfig'] = _AUGMENTATIONCONFIG
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
AugmentationConfig = _reflection.GeneratedProtocolMessageType('AugmentationConfig', (_message.Message,), dict(
ImageMeanEntry = _reflection.GeneratedProtocolMessageType('ImageMeanEntry', (_message.Message,), dict(
DESCRIPTOR = _AUGMENTATIONCONFIG_IMAGEMEANENTRY,
__module__ = 'nvidia_tao_deploy.cv.yolo_v4.proto.augmentation_config_pb2'
# @@protoc_insertion_point(class_scope:AugmentationConfig.ImageMeanEntry)
))
,
DESCRIPTOR = _AUGMENTATIONCONFIG,
__module__ = 'nvidia_tao_deploy.cv.yolo_v4.proto.augmentation_config_pb2'
# @@protoc_insertion_point(class_scope:AugmentationConfig)
))
_sym_db.RegisterMessage(AugmentationConfig)
_sym_db.RegisterMessage(AugmentationConfig.ImageMeanEntry)
_AUGMENTATIONCONFIG_IMAGEMEANENTRY._options = None
# @@protoc_insertion_point(module_scope)
| tao_deploy-main | nvidia_tao_deploy/cv/yolo_v4/proto/augmentation_config_pb2.py |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nvidia_tao_deploy/cv/yolo_v4/proto/experiment.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from nvidia_tao_deploy.cv.yolo_v4.proto import augmentation_config_pb2 as nvidia__tao__deploy_dot_cv_dot_yolo__v4_dot_proto_dot_augmentation__config__pb2
from nvidia_tao_deploy.cv.yolo_v3.proto import dataset_config_pb2 as nvidia__tao__deploy_dot_cv_dot_yolo__v3_dot_proto_dot_dataset__config__pb2
from nvidia_tao_deploy.cv.common.proto import training_config_pb2 as nvidia__tao__deploy_dot_cv_dot_common_dot_proto_dot_training__config__pb2
from nvidia_tao_deploy.cv.common.proto import eval_config_pb2 as nvidia__tao__deploy_dot_cv_dot_common_dot_proto_dot_eval__config__pb2
from nvidia_tao_deploy.cv.common.proto import nms_config_pb2 as nvidia__tao__deploy_dot_cv_dot_common_dot_proto_dot_nms__config__pb2
from nvidia_tao_deploy.cv.common.proto import class_weighting_config_pb2 as nvidia__tao__deploy_dot_cv_dot_common_dot_proto_dot_class__weighting__config__pb2
from nvidia_tao_deploy.cv.yolo_v4.proto import yolov4_config_pb2 as nvidia__tao__deploy_dot_cv_dot_yolo__v4_dot_proto_dot_yolov4__config__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='nvidia_tao_deploy/cv/yolo_v4/proto/experiment.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n3nvidia_tao_deploy/cv/yolo_v4/proto/experiment.proto\x1a<nvidia_tao_deploy/cv/yolo_v4/proto/augmentation_config.proto\x1a\x37nvidia_tao_deploy/cv/yolo_v3/proto/dataset_config.proto\x1a\x37nvidia_tao_deploy/cv/common/proto/training_config.proto\x1a\x33nvidia_tao_deploy/cv/common/proto/eval_config.proto\x1a\x32nvidia_tao_deploy/cv/common/proto/nms_config.proto\x1a>nvidia_tao_deploy/cv/common/proto/class_weighting_config.proto\x1a\x36nvidia_tao_deploy/cv/yolo_v4/proto/yolov4_config.proto\"\xca\x02\n\nExperiment\x12,\n\x0e\x64\x61taset_config\x18\x01 \x01(\x0b\x32\x14.YOLOv3DatasetConfig\x12\x30\n\x13\x61ugmentation_config\x18\x02 \x01(\x0b\x32\x13.AugmentationConfig\x12(\n\x0ftraining_config\x18\x03 \x01(\x0b\x32\x0f.TrainingConfig\x12 \n\x0b\x65val_config\x18\x04 \x01(\x0b\x32\x0b.EvalConfig\x12\x1e\n\nnms_config\x18\x05 \x01(\x0b\x32\n.NMSConfig\x12$\n\ryolov4_config\x18\x06 \x01(\x0b\x32\r.YOLOv4Config\x12\x35\n\x16\x63lass_weighting_config\x18\x08 \x01(\x0b\x32\x15.ClassWeightingConfig\x12\x13\n\x0brandom_seed\x18\x07 \x01(\rb\x06proto3')
,
dependencies=[nvidia__tao__deploy_dot_cv_dot_yolo__v4_dot_proto_dot_augmentation__config__pb2.DESCRIPTOR,nvidia__tao__deploy_dot_cv_dot_yolo__v3_dot_proto_dot_dataset__config__pb2.DESCRIPTOR,nvidia__tao__deploy_dot_cv_dot_common_dot_proto_dot_training__config__pb2.DESCRIPTOR,nvidia__tao__deploy_dot_cv_dot_common_dot_proto_dot_eval__config__pb2.DESCRIPTOR,nvidia__tao__deploy_dot_cv_dot_common_dot_proto_dot_nms__config__pb2.DESCRIPTOR,nvidia__tao__deploy_dot_cv_dot_common_dot_proto_dot_class__weighting__config__pb2.DESCRIPTOR,nvidia__tao__deploy_dot_cv_dot_yolo__v4_dot_proto_dot_yolov4__config__pb2.DESCRIPTOR,])
_EXPERIMENT = _descriptor.Descriptor(
name='Experiment',
full_name='Experiment',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='dataset_config', full_name='Experiment.dataset_config', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='augmentation_config', full_name='Experiment.augmentation_config', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='training_config', full_name='Experiment.training_config', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='eval_config', full_name='Experiment.eval_config', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='nms_config', full_name='Experiment.nms_config', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='yolov4_config', full_name='Experiment.yolov4_config', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='class_weighting_config', full_name='Experiment.class_weighting_config', index=6,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='random_seed', full_name='Experiment.random_seed', index=7,
number=7, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=457,
serialized_end=787,
)
_EXPERIMENT.fields_by_name['dataset_config'].message_type = nvidia__tao__deploy_dot_cv_dot_yolo__v3_dot_proto_dot_dataset__config__pb2._YOLOV3DATASETCONFIG
_EXPERIMENT.fields_by_name['augmentation_config'].message_type = nvidia__tao__deploy_dot_cv_dot_yolo__v4_dot_proto_dot_augmentation__config__pb2._AUGMENTATIONCONFIG
_EXPERIMENT.fields_by_name['training_config'].message_type = nvidia__tao__deploy_dot_cv_dot_common_dot_proto_dot_training__config__pb2._TRAININGCONFIG
_EXPERIMENT.fields_by_name['eval_config'].message_type = nvidia__tao__deploy_dot_cv_dot_common_dot_proto_dot_eval__config__pb2._EVALCONFIG
_EXPERIMENT.fields_by_name['nms_config'].message_type = nvidia__tao__deploy_dot_cv_dot_common_dot_proto_dot_nms__config__pb2._NMSCONFIG
_EXPERIMENT.fields_by_name['yolov4_config'].message_type = nvidia__tao__deploy_dot_cv_dot_yolo__v4_dot_proto_dot_yolov4__config__pb2._YOLOV4CONFIG
_EXPERIMENT.fields_by_name['class_weighting_config'].message_type = nvidia__tao__deploy_dot_cv_dot_common_dot_proto_dot_class__weighting__config__pb2._CLASSWEIGHTINGCONFIG
DESCRIPTOR.message_types_by_name['Experiment'] = _EXPERIMENT
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Experiment = _reflection.GeneratedProtocolMessageType('Experiment', (_message.Message,), dict(
DESCRIPTOR = _EXPERIMENT,
__module__ = 'nvidia_tao_deploy.cv.yolo_v4.proto.experiment_pb2'
# @@protoc_insertion_point(class_scope:Experiment)
))
_sym_db.RegisterMessage(Experiment)
# @@protoc_insertion_point(module_scope)
| tao_deploy-main | nvidia_tao_deploy/cv/yolo_v4/proto/experiment_pb2.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TAO Deploy Config Base Utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from google.protobuf.text_format import Merge as merge_text_proto
from nvidia_tao_deploy.cv.yolo_v4.proto.experiment_pb2 import Experiment
def load_proto(config):
"""Load the experiment proto."""
proto = Experiment()
def _load_from_file(filename, pb2):
if not os.path.exists(filename):
raise IOError(f"Specfile not found at: {filename}")
with open(filename, "r", encoding="utf-8") as f:
merge_text_proto(f.read(), pb2)
_load_from_file(config, proto)
return proto
| tao_deploy-main | nvidia_tao_deploy/cv/yolo_v4/proto/utils.py |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nvidia_tao_deploy/cv/yolo_v4/proto/yolov4_config.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='nvidia_tao_deploy/cv/yolo_v4/proto/yolov4_config.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n6nvidia_tao_deploy/cv/yolo_v4/proto/yolov4_config.proto\"\x9b\x04\n\x0cYOLOv4Config\x12\x18\n\x10\x62ig_anchor_shape\x18\x01 \x01(\t\x12\x18\n\x10mid_anchor_shape\x18\x02 \x01(\t\x12\x1a\n\x12small_anchor_shape\x18\x03 \x01(\t\x12 \n\x18matching_neutral_box_iou\x18\x04 \x01(\x02\x12\x18\n\x10\x62ox_matching_iou\x18\x05 \x01(\x02\x12\x0c\n\x04\x61rch\x18\x06 \x01(\t\x12\x0f\n\x07nlayers\x18\x07 \x01(\r\x12\x18\n\x10\x61rch_conv_blocks\x18\x08 \x01(\r\x12\x17\n\x0floss_loc_weight\x18\t \x01(\x02\x12\x1c\n\x14loss_neg_obj_weights\x18\n \x01(\x02\x12\x1a\n\x12loss_class_weights\x18\x0b \x01(\x02\x12\x15\n\rfreeze_blocks\x18\x0c \x03(\x02\x12\x11\n\tfreeze_bn\x18\r \x01(\x08\x12\x12\n\nforce_relu\x18\x0e \x01(\x08\x12\x12\n\nactivation\x18\x15 \x01(\t\x12\x18\n\x10\x66ocal_loss_alpha\x18\x0f \x01(\x02\x12\x18\n\x10\x66ocal_loss_gamma\x18\x10 \x01(\x02\x12\x17\n\x0flabel_smoothing\x18\x11 \x01(\x02\x12\x1a\n\x12\x62ig_grid_xy_extend\x18\x12 \x01(\x02\x12\x1a\n\x12mid_grid_xy_extend\x18\x13 \x01(\x02\x12\x1c\n\x14small_grid_xy_extend\x18\x14 \x01(\x02\x62\x06proto3')
)
_YOLOV4CONFIG = _descriptor.Descriptor(
name='YOLOv4Config',
full_name='YOLOv4Config',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='big_anchor_shape', full_name='YOLOv4Config.big_anchor_shape', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='mid_anchor_shape', full_name='YOLOv4Config.mid_anchor_shape', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='small_anchor_shape', full_name='YOLOv4Config.small_anchor_shape', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='matching_neutral_box_iou', full_name='YOLOv4Config.matching_neutral_box_iou', index=3,
number=4, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='box_matching_iou', full_name='YOLOv4Config.box_matching_iou', index=4,
number=5, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='arch', full_name='YOLOv4Config.arch', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='nlayers', full_name='YOLOv4Config.nlayers', index=6,
number=7, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='arch_conv_blocks', full_name='YOLOv4Config.arch_conv_blocks', index=7,
number=8, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='loss_loc_weight', full_name='YOLOv4Config.loss_loc_weight', index=8,
number=9, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='loss_neg_obj_weights', full_name='YOLOv4Config.loss_neg_obj_weights', index=9,
number=10, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='loss_class_weights', full_name='YOLOv4Config.loss_class_weights', index=10,
number=11, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='freeze_blocks', full_name='YOLOv4Config.freeze_blocks', index=11,
number=12, type=2, cpp_type=6, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='freeze_bn', full_name='YOLOv4Config.freeze_bn', index=12,
number=13, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='force_relu', full_name='YOLOv4Config.force_relu', index=13,
number=14, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='activation', full_name='YOLOv4Config.activation', index=14,
number=21, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='focal_loss_alpha', full_name='YOLOv4Config.focal_loss_alpha', index=15,
number=15, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='focal_loss_gamma', full_name='YOLOv4Config.focal_loss_gamma', index=16,
number=16, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='label_smoothing', full_name='YOLOv4Config.label_smoothing', index=17,
number=17, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='big_grid_xy_extend', full_name='YOLOv4Config.big_grid_xy_extend', index=18,
number=18, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='mid_grid_xy_extend', full_name='YOLOv4Config.mid_grid_xy_extend', index=19,
number=19, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='small_grid_xy_extend', full_name='YOLOv4Config.small_grid_xy_extend', index=20,
number=20, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=59,
serialized_end=598,
)
DESCRIPTOR.message_types_by_name['YOLOv4Config'] = _YOLOV4CONFIG
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
YOLOv4Config = _reflection.GeneratedProtocolMessageType('YOLOv4Config', (_message.Message,), dict(
DESCRIPTOR = _YOLOV4CONFIG,
__module__ = 'nvidia_tao_deploy.cv.yolo_v4.proto.yolov4_config_pb2'
# @@protoc_insertion_point(class_scope:YOLOv4Config)
))
_sym_db.RegisterMessage(YOLOv4Config)
# @@protoc_insertion_point(module_scope)
| tao_deploy-main | nvidia_tao_deploy/cv/yolo_v4/proto/yolov4_config_pb2.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""YOLOv4 convert etlt/onnx model to TRT engine."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import logging
import os
import tempfile
from nvidia_tao_deploy.cv.common.decorators import monitor_status
from nvidia_tao_deploy.cv.yolo_v4.proto.utils import load_proto
from nvidia_tao_deploy.cv.yolo_v3.engine_builder import YOLOv3EngineBuilder
from nvidia_tao_deploy.utils.decoding import decode_model
logging.basicConfig(format='%(asctime)s [TAO Toolkit] [%(levelname)s] %(name)s %(lineno)d: %(message)s',
level="INFO")
logger = logging.getLogger(__name__)
DEFAULT_MAX_BATCH_SIZE = 1
DEFAULT_MIN_BATCH_SIZE = 1
DEFAULT_OPT_BATCH_SIZE = 1
@monitor_status(name='yolo_v4', mode='gen_trt_engine')
def main(args):
"""YOLOv4 TRT convert."""
# decrypt etlt
tmp_onnx_file, file_format = decode_model(args.model_path, args.key)
# Load from proto-based spec file
es = load_proto(args.experiment_spec)
if args.engine_file is not None or args.data_type == 'int8':
if args.engine_file is None:
engine_handle, temp_engine_path = tempfile.mkstemp()
os.close(engine_handle)
output_engine_path = temp_engine_path
else:
output_engine_path = args.engine_file
builder = YOLOv3EngineBuilder(verbose=args.verbose,
is_qat=es.training_config.enable_qat,
workspace=args.max_workspace_size,
min_batch_size=args.min_batch_size,
opt_batch_size=args.opt_batch_size,
max_batch_size=args.max_batch_size,
strict_type_constraints=args.strict_type_constraints,
force_ptq=args.force_ptq)
builder.create_network(tmp_onnx_file, file_format)
builder.create_engine(
output_engine_path,
args.data_type,
calib_data_file=args.cal_data_file,
calib_input=args.cal_image_dir,
calib_cache=args.cal_cache_file,
calib_num_images=args.batch_size * args.batches,
calib_batch_size=args.batch_size,
calib_json_file=args.cal_json_file)
logging.info("Export finished successfully.")
def build_command_line_parser(parser=None):
"""Build the command line parser using argparse.
Args:
parser (subparser): Provided from the wrapper script to build a chained
parser mechanism.
Returns:
parser
"""
if parser is None:
parser = argparse.ArgumentParser(prog='gen_trt_engine', description='Generate TRT engine of YOLOv4 model.')
parser.add_argument(
'-m',
'--model_path',
type=str,
required=True,
help='Path to a YOLOv4 .etlt or .onnx model file.'
)
parser.add_argument(
'-k',
'--key',
type=str,
required=False,
help='Key to save or load a .etlt model.'
)
parser.add_argument(
'-e',
'--experiment_spec',
type=str,
required=True,
help='Path to the experiment spec file.'
)
parser.add_argument(
"--data_type",
type=str,
default="fp32",
help="Data type for the TensorRT export.",
choices=["fp32", "fp16", "int8"])
parser.add_argument(
"--cal_image_dir",
default="",
type=str,
help="Directory of images to run int8 calibration.")
parser.add_argument(
"--cal_data_file",
default=None,
type=str,
help="Tensorfile to run calibration for int8 optimization.")
parser.add_argument(
'--cal_cache_file',
default=None,
type=str,
help='Calibration cache file to write to.')
parser.add_argument(
'--cal_json_file',
default=None,
type=str,
help='Dictionary containing tensor scale for QAT models.')
parser.add_argument(
"--engine_file",
type=str,
default=None,
help="Path to the exported TRT engine.")
parser.add_argument(
"--max_batch_size",
type=int,
default=DEFAULT_MAX_BATCH_SIZE,
help="Max batch size for TensorRT engine builder.")
parser.add_argument(
"--min_batch_size",
type=int,
default=DEFAULT_MIN_BATCH_SIZE,
help="Min batch size for TensorRT engine builder.")
parser.add_argument(
"--opt_batch_size",
type=int,
default=DEFAULT_OPT_BATCH_SIZE,
help="Opt batch size for TensorRT engine builder.")
parser.add_argument(
"--batch_size",
type=int,
default=1,
help="Number of images per batch.")
parser.add_argument(
"--batches",
type=int,
default=10,
help="Number of batches to calibrate over.")
parser.add_argument(
"--max_workspace_size",
type=int,
default=2,
help="Max memory workspace size to allow in Gb for TensorRT engine builder (default: 2).")
parser.add_argument(
"-s",
"--strict_type_constraints",
action="store_true",
default=False,
help="A Boolean flag indicating whether to apply the \
TensorRT strict type constraints when building the TensorRT engine.")
parser.add_argument(
"--force_ptq",
action="store_true",
default=False,
help="Flag to force post training quantization for QAT models.")
parser.add_argument(
"-v",
"--verbose",
action="store_true",
default=False,
help="Verbosity of the logger.")
parser.add_argument(
'-r',
'--results_dir',
type=str,
required=True,
default=None,
help='Output directory where the log is saved.'
)
return parser
def parse_command_line_arguments(args=None):
"""Simple function to parse command line arguments."""
parser = build_command_line_parser(args)
return parser.parse_args(args)
if __name__ == '__main__':
args = parse_command_line_arguments()
main(args)
| tao_deploy-main | nvidia_tao_deploy/cv/yolo_v4/scripts/gen_trt_engine.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TAO Deploy YOLOv4 scripts module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_deploy-main | nvidia_tao_deploy/cv/yolo_v4/scripts/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Standalone TensorRT inference."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
from PIL import Image
import numpy as np
from tqdm.auto import tqdm
import logging
from nvidia_tao_deploy.cv.common.decorators import monitor_status
from nvidia_tao_deploy.cv.yolo_v3.dataloader import YOLOv3KITTILoader, aug_letterbox_resize
from nvidia_tao_deploy.cv.yolo_v3.inferencer import YOLOv3Inferencer
from nvidia_tao_deploy.cv.yolo_v4.proto.utils import load_proto
logging.getLogger('PIL').setLevel(logging.WARNING)
logging.basicConfig(format='%(asctime)s [TAO Toolkit] [%(levelname)s] %(name)s %(lineno)d: %(message)s',
level="INFO")
logger = logging.getLogger(__name__)
@monitor_status(name='yolo_v4', mode='inference')
def main(args):
"""YOLOv4 TRT inference."""
trt_infer = YOLOv3Inferencer(args.model_path, batch_size=args.batch_size)
c, h, w = trt_infer._input_shape
# Load from proto-based spec file
es = load_proto(args.experiment_spec)
conf_thres = es.nms_config.confidence_threshold if es.nms_config.confidence_threshold else 0.01
batch_size = args.batch_size if args.batch_size else es.eval_config.batch_size
img_mean = es.augmentation_config.image_mean
if c == 3:
if img_mean:
img_mean = [img_mean['b'], img_mean['g'], img_mean['r']]
else:
img_mean = [103.939, 116.779, 123.68]
else:
if img_mean:
img_mean = [img_mean['l']]
else:
img_mean = [117.3786]
# Override path if provided through command line args
if args.image_dir:
image_dirs = [args.image_dir]
else:
image_dirs = [d.image_directory_path for d in es.dataset_config.validation_data_sources]
# Load mapping_dict from the spec file
mapping_dict = dict(es.dataset_config.target_class_mapping)
image_depth = es.augmentation_config.output_depth if es.augmentation_config.output_depth else 8
dl = YOLOv3KITTILoader(
shape=(c, h, w),
image_dirs=image_dirs,
label_dirs=[None],
mapping_dict=mapping_dict,
exclude_difficult=True,
batch_size=batch_size,
is_inference=True,
image_mean=img_mean,
image_depth=image_depth,
dtype=trt_infer.inputs[0].host.dtype)
inv_classes = {v: k for k, v in dl.classes.items()}
if args.results_dir is None:
results_dir = os.path.dirname(args.model_path)
else:
results_dir = args.results_dir
os.makedirs(results_dir, exist_ok=True)
output_annotate_root = os.path.join(results_dir, "images_annotated")
output_label_root = os.path.join(results_dir, "labels")
os.makedirs(output_annotate_root, exist_ok=True)
os.makedirs(output_label_root, exist_ok=True)
for i, (imgs, _) in tqdm(enumerate(dl), total=len(dl), desc="Producing predictions"):
y_pred = trt_infer.infer(imgs)
image_paths = dl.image_paths[np.arange(args.batch_size) + args.batch_size * i]
for i in range(len(y_pred)):
y_pred_valid = y_pred[i][y_pred[i][:, 1] > conf_thres]
for i in range(len(y_pred)):
y_pred_valid = y_pred[i][y_pred[i][:, 1] > conf_thres]
target_size = np.array([w, h, w, h])
# Scale back bounding box coordinates
y_pred_valid[:, 2:6] *= target_size[None, :]
# Load image
img = Image.open(image_paths[i])
# Handle grayscale images
if c == 1 and image_depth == 8:
img = img.convert('L')
elif c == 1 and image_depth == 16:
img = img.convert('I')
orig_width, orig_height = img.size
img, _, crop_coord = aug_letterbox_resize(img,
y_pred_valid[:, 2:6],
num_channels=c,
resize_shape=(trt_infer.width, trt_infer.height))
img = Image.fromarray(img.astype('uint8'))
# Store images
bbox_img, label_strings = trt_infer.draw_bbox(img, y_pred_valid, inv_classes, args.threshold)
bbox_img = bbox_img.crop((crop_coord[0], crop_coord[1], crop_coord[2], crop_coord[3]))
bbox_img = bbox_img.resize((orig_width, orig_height))
img_filename = os.path.basename(image_paths[i])
bbox_img.save(os.path.join(output_annotate_root, img_filename))
# Store labels
filename, _ = os.path.splitext(img_filename)
label_file_name = os.path.join(output_label_root, filename + ".txt")
with open(label_file_name, "w", encoding="utf-8") as f:
for l_s in label_strings:
f.write(l_s)
logging.info("Finished inference.")
def build_command_line_parser(parser=None):
"""Build the command line parser using argparse.
Args:
parser (subparser): Provided from the wrapper script to build a chained
parser mechanism.
Returns:
parser
"""
if parser is None:
parser = argparse.ArgumentParser(prog='infer', description='Inference with a YOLOv4 TRT model.')
parser.add_argument(
'-i',
'--image_dir',
type=str,
required=False,
default=None,
help='Input directory of images')
parser.add_argument(
'-e',
'--experiment_spec',
type=str,
required=True,
help='Path to the experiment spec file.'
)
parser.add_argument(
'-m',
'--model_path',
type=str,
required=True,
help='Path to the YOLOv4 TensorRT engine.'
)
parser.add_argument(
'-b',
'--batch_size',
type=int,
required=False,
default=1,
help='Batch size.')
parser.add_argument(
'-r',
'--results_dir',
type=str,
required=True,
default=None,
help='Output directory where the log is saved.'
)
parser.add_argument(
'-t',
'--threshold',
type=float,
default=0.3,
help='Confidence threshold for inference.')
return parser
def parse_command_line_arguments(args=None):
"""Simple function to parse command line arguments."""
parser = build_command_line_parser(args)
return parser.parse_args(args)
if __name__ == '__main__':
args = parse_command_line_arguments()
main(args)
| tao_deploy-main | nvidia_tao_deploy/cv/yolo_v4/scripts/inference.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Standalone TensorRT evaluation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import json
import numpy as np
from tqdm.auto import tqdm
import logging
from nvidia_tao_deploy.cv.common.decorators import monitor_status
from nvidia_tao_deploy.cv.yolo_v3.dataloader import YOLOv3KITTILoader
from nvidia_tao_deploy.cv.yolo_v3.inferencer import YOLOv3Inferencer
from nvidia_tao_deploy.cv.yolo_v4.proto.utils import load_proto
from nvidia_tao_deploy.metrics.kitti_metric import KITTIMetric
logging.getLogger('PIL').setLevel(logging.WARNING)
logging.basicConfig(format='%(asctime)s [TAO Toolkit] [%(levelname)s] %(name)s %(lineno)d: %(message)s',
level="INFO")
logger = logging.getLogger(__name__)
@monitor_status(name='yolo_v4', mode='evaluation')
def main(args):
"""YOLOv4 TRT evaluation."""
trt_infer = YOLOv3Inferencer(args.model_path, batch_size=args.batch_size)
c, h, w = trt_infer._input_shape
# Load from proto-based spec file
es = load_proto(args.experiment_spec)
matching_iou_threshold = es.eval_config.matching_iou_threshold if es.eval_config.matching_iou_threshold else 0.5
conf_thres = es.nms_config.confidence_threshold if es.nms_config.confidence_threshold else 0.01
batch_size = args.batch_size if args.batch_size else es.eval_config.batch_size
ap_mode = es.eval_config.average_precision_mode
ap_mode_dict = {0: "sample", 1: "integrate"}
img_mean = es.augmentation_config.image_mean
if c == 3:
if img_mean:
img_mean = [img_mean['b'], img_mean['g'], img_mean['r']]
else:
img_mean = [103.939, 116.779, 123.68]
else:
if img_mean:
img_mean = [img_mean['l']]
else:
img_mean = [117.3786]
# Override path if provided through command line args
if args.image_dir:
image_dirs = [args.image_dir]
else:
image_dirs = [d.image_directory_path for d in es.dataset_config.validation_data_sources]
if args.label_dir:
label_dirs = [args.label_dir]
else:
label_dirs = [d.label_directory_path for d in es.dataset_config.validation_data_sources]
# Load mapping_dict from the spec file
mapping_dict = dict(es.dataset_config.target_class_mapping)
image_depth = es.augmentation_config.output_depth if es.augmentation_config.output_depth else 8
dl = YOLOv3KITTILoader(
shape=(c, h, w),
image_dirs=image_dirs,
label_dirs=label_dirs,
mapping_dict=mapping_dict,
exclude_difficult=True,
batch_size=batch_size,
image_mean=img_mean,
image_depth=image_depth,
dtype=trt_infer.inputs[0].host.dtype)
eval_metric = KITTIMetric(n_classes=len(dl.classes),
matching_iou_threshold=matching_iou_threshold,
conf_thres=conf_thres,
average_precision_mode=ap_mode_dict[ap_mode])
gt_labels = []
pred_labels = []
for i, (imgs, labels) in tqdm(enumerate(dl), total=len(dl), desc="Producing predictions"):
gt_labels.extend(labels)
y_pred = trt_infer.infer(imgs)
for i in range(len(y_pred)):
y_pred_valid = y_pred[i][y_pred[i][:, 1] > eval_metric.conf_thres]
pred_labels.append(y_pred_valid)
m_ap, ap = eval_metric(gt_labels, pred_labels, verbose=True)
m_ap = np.mean(ap)
logging.info("*******************************")
class_mapping = {v: k for k, v in dl.classes.items()}
eval_results = {}
for i in range(len(dl.classes)):
eval_results['AP_' + class_mapping[i]] = np.float64(ap[i])
logging.info("{:<14}{:<6}{}".format(class_mapping[i], 'AP', round(ap[i], 5))) # noqa pylint: disable=C0209
logging.info("{:<14}{:<6}{}".format('', 'mAP', round(m_ap, 3))) # noqa pylint: disable=C0209
logging.info("*******************************")
# Store evaluation results into JSON
if args.results_dir is None:
results_dir = os.path.dirname(args.model_path)
else:
results_dir = args.results_dir
with open(os.path.join(results_dir, "results.json"), "w", encoding="utf-8") as f:
json.dump(eval_results, f)
logging.info("Finished evaluation.")
def build_command_line_parser(parser=None):
"""Build the command line parser using argparse.
Args:
parser (subparser): Provided from the wrapper script to build a chained
parser mechanism.
Returns:
parser
"""
if parser is None:
parser = argparse.ArgumentParser(prog='eval', description='Evaluate with a YOLOv4 TRT model.')
parser.add_argument(
'-i',
'--image_dir',
type=str,
required=False,
default=None,
help='Input directory of images')
parser.add_argument(
'-e',
'--experiment_spec',
type=str,
required=True,
help='Path to the experiment spec file.'
)
parser.add_argument(
'-m',
'--model_path',
type=str,
required=True,
help='Path to the YOLOv4 TensorRT engine.'
)
parser.add_argument(
'-l',
'--label_dir',
type=str,
required=False,
help='Label directory.')
parser.add_argument(
'-b',
'--batch_size',
type=int,
required=False,
default=1,
help='Batch size.')
parser.add_argument(
'-r',
'--results_dir',
type=str,
required=True,
default=None,
help='Output directory where the log is saved.'
)
return parser
def parse_command_line_arguments(args=None):
"""Simple function to parse command line arguments."""
parser = build_command_line_parser(args)
return parser.parse_args(args)
if __name__ == '__main__':
args = parse_command_line_arguments()
main(args)
| tao_deploy-main | nvidia_tao_deploy/cv/yolo_v4/scripts/evaluate.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Entrypoint module for yolo v4."""
| tao_deploy-main | nvidia_tao_deploy/cv/yolo_v4/entrypoint/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TAO Deploy command line wrapper to invoke CLI scripts."""
import sys
from nvidia_tao_deploy.cv.common.entrypoint.entrypoint_proto import launch_job
import nvidia_tao_deploy.cv.yolo_v4.scripts
def main():
"""Function to launch the job."""
launch_job(nvidia_tao_deploy.cv.yolo_v4.scripts, "yolo_v4", sys.argv[1:])
if __name__ == "__main__":
main()
| tao_deploy-main | nvidia_tao_deploy/cv/yolo_v4/entrypoint/yolo_v4.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility class for performing TensorRT image inference."""
import numpy as np
from PIL import ImageDraw
import pycocotools.mask as maskUtils
import tensorrt as trt
from nvidia_tao_deploy.cv.mask_rcnn.utils import generate_segmentation_from_masks, draw_mask_on_image_array
from nvidia_tao_deploy.inferencer.trt_inferencer import TRTInferencer
from nvidia_tao_deploy.inferencer.utils import allocate_buffers, do_inference
def trt_output_process_fn(y_pred, nms_size, mask_size, n_classes):
"""Proccess raw output from TRT engine."""
y_detection = y_pred[0].reshape((-1, nms_size, 6))
y_mask = y_pred[1].reshape((-1, nms_size, n_classes, mask_size, mask_size))
y_mask[y_mask < 0] = 0
return [y_detection, y_mask]
def process_prediction_for_eval(scales, box_coordinates):
"""Process the model prediction for COCO eval."""
processed_box_coordinates = np.zeros_like(box_coordinates)
# Handle the last batch where the # of images is smaller than the batch size.
# Need to pad the scales to be in the correct batch shape
if len(scales) != box_coordinates.shape[0]:
new_scales = [1.0] * box_coordinates.shape[0]
new_scales[:len(scales)] = scales
scales = new_scales
for image_id in range(box_coordinates.shape[0]):
scale = scales[image_id]
for box_id in range(box_coordinates.shape[1]):
# Map [y1, x1, y2, x2] -> [x1, y1, w, h] and multiply detections
# by image scale.
y1, x1, y2, x2 = box_coordinates[image_id, box_id, :]
new_box = scale * np.array([x1, y1, x2 - x1, y2 - y1])
processed_box_coordinates[image_id, box_id, :] = new_box
return processed_box_coordinates
class MRCNNInferencer(TRTInferencer):
"""Manages TensorRT objects for model inference."""
def __init__(self, engine_path, nms_size=100, n_classes=2, mask_size=28, input_shape=None, batch_size=None, data_format="channel_first"):
"""Initializes TensorRT objects needed for model inference.
Args:
engine_path (str): path where TensorRT engine should be stored
input_shape (tuple): (batch, channel, height, width) for dynamic shape engine
batch_size (int): batch size for dynamic shape engine
data_format (str): either channel_first or channel_last
"""
# Load TRT engine
super().__init__(engine_path)
self.nms_size = nms_size
self.n_classes = n_classes
self.mask_size = mask_size
self.max_batch_size = self.engine.max_batch_size
self.execute_v2 = False
# Execution context is needed for inference
self.context = None
# Allocate memory for multiple usage [e.g. multiple batch inference]
self._input_shape = []
for binding in range(self.engine.num_bindings):
if self.engine.binding_is_input(binding):
self._input_shape = self.engine.get_binding_shape(binding)[-3:]
assert len(self._input_shape) == 3, "Engine doesn't have valid input dimensions"
if data_format == "channel_first":
self.height = self._input_shape[1]
self.width = self._input_shape[2]
else:
self.height = self._input_shape[0]
self.width = self._input_shape[1]
# set binding_shape for dynamic input
if (input_shape is not None) or (batch_size is not None):
self.context = self.engine.create_execution_context()
if input_shape is not None:
self.context.set_binding_shape(0, input_shape)
self.max_batch_size = input_shape[0]
else:
self.context.set_binding_shape(0, [batch_size] + list(self._input_shape))
self.max_batch_size = batch_size
self.execute_v2 = True
# This allocates memory for network inputs/outputs on both CPU and GPU
self.inputs, self.outputs, self.bindings, self.stream = allocate_buffers(self.engine,
self.context)
if self.context is None:
self.context = self.engine.create_execution_context()
input_volume = trt.volume(self._input_shape)
self.numpy_array = np.zeros((self.max_batch_size, input_volume))
def infer(self, imgs, scales=None):
"""Infers model on batch of same sized images resized to fit the model.
Args:
image_paths (str): paths to images, that will be packed into batch
and fed into model
"""
# Verify if the supplied batch size is not too big
max_batch_size = self.max_batch_size
actual_batch_size = len(imgs)
if actual_batch_size > max_batch_size:
raise ValueError(f"image_paths list bigger ({actual_batch_size}) than \
engine max batch size ({max_batch_size})")
self.numpy_array[:actual_batch_size] = imgs.reshape(actual_batch_size, -1)
# ...copy them into appropriate place into memory...
# (self.inputs was returned earlier by allocate_buffers())
np.copyto(self.inputs[0].host, self.numpy_array.ravel())
# ...fetch model outputs...
results = do_inference(
self.context, bindings=self.bindings, inputs=self.inputs,
outputs=self.outputs, stream=self.stream,
batch_size=max_batch_size,
execute_v2=self.execute_v2)
# ...and return results up to the actual batch size.
y_pred = [i.reshape(max_batch_size, -1)[:actual_batch_size] for i in results]
# Process TRT outputs to proper format
processed_outputs = trt_output_process_fn(y_pred,
n_classes=self.n_classes,
mask_size=self.mask_size,
nms_size=self.nms_size)
detections = {}
bs, nd, _, _, _ = processed_outputs[1].shape
masks = np.zeros((bs, nd)).tolist()
for b in range(bs):
for n in range(nd):
class_idx = processed_outputs[0][..., -2][b, n]
masks[b][n] = processed_outputs[1][b, n, int(class_idx), ...] # if class_idx = -1
masks = np.array(masks)
bboxes = process_prediction_for_eval(scales, processed_outputs[0][..., 0:4])
classes = np.copy(processed_outputs[0][..., -2])
scores = np.copy(processed_outputs[0][..., -1])
detections['detection_classes'] = classes
detections['detection_scores'] = scores
detections['detection_boxes'] = bboxes
detections['detection_masks'] = masks
detections['num_detections'] = np.array([self.nms_size] * self.max_batch_size).astype(np.int32)
return detections
def __del__(self):
"""Clear things up on object deletion."""
# Clear session and buffer
if self.trt_runtime:
del self.trt_runtime
if self.context:
del self.context
if self.engine:
del self.engine
if self.stream:
del self.stream
# Loop through inputs and free inputs.
for inp in self.inputs:
inp.device.free()
# Loop through outputs and free them.
for out in self.outputs:
out.device.free()
def draw_bbox_and_segm(self, img, classes, scores, bboxes, masks, class_mapping, threshold=0.3):
"""Draws bounding box and segmentation on image and dump prediction in KITTI format
Args:
img (numpy.ndarray): Preprocessed image
classes (numpy.ndarray): (N x 100) predictions
scores (numpy.ndarray): (N x 100) predictions
bboxes (numpy.ndarray): (N x 100 x 4) predictions
masks (numpy.ndarray): (N x 100 x mask_height x mask_width) predictions
class_mapping (dict): key is the class index and value is the class string
threshold (float): value to filter predictions
"""
draw = ImageDraw.Draw(img)
color_list = ['Black', 'Red', 'Blue', 'Gold', 'Purple']
label_strings = []
for idx, (cls, score, bbox, mask) in enumerate(zip(classes, scores, bboxes, masks)):
cls_name = class_mapping[int(cls)]
if float(score) < threshold:
continue
x1, y1, w, h = bbox
x2, y2 = x1 + w, y1 + h
draw.rectangle(((x1, y1), (x2, y2)),
outline=color_list[int(cls) % len(color_list)])
# txt pad
draw.rectangle(((x1, y1), (x1 + 75, y1 + 10)),
fill=color_list[int(cls) % len(color_list)])
draw.text((x1, y1), f"{cls_name}: {score:.2f}")
# Overlay segmentations
mask = np.expand_dims(mask, axis=0)
detected_bbox = np.expand_dims(bbox, axis=0)
segms = generate_segmentation_from_masks(
mask, detected_bbox,
image_width=self.width,
image_height=self.height,
is_image_mask=False)
segms = segms[0, :, :]
img = draw_mask_on_image_array(img, segms, color=color_list[int(cls) % len(color_list)], alpha=0.4)
draw = ImageDraw.Draw(img)
# Dump labels
json_obj = {}
hhh, www = bbox[3] - bbox[1], bbox[2] - bbox[0]
json_obj['area'] = int(www * hhh)
json_obj['is_crowd'] = 0
json_obj['bbox'] = [int(bbox[1]), int(bbox[0]), int(hhh), int(www)]
json_obj['id'] = idx
json_obj['category_id'] = int(cls)
json_obj['score'] = float(score)
# use RLE
encoded_mask = maskUtils.encode(
np.asfortranarray(segms.astype(np.uint8)))
encoded_mask['counts'] = encoded_mask['counts'].decode('ascii')
json_obj["segmentation"] = encoded_mask
label_strings.append(json_obj)
return img, label_strings
| tao_deploy-main | nvidia_tao_deploy/cv/mask_rcnn/inferencer.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MRCNN TensorRT engine builder."""
import logging
import os
import random
from six.moves import xrange
import sys
import traceback
from tqdm import tqdm
try:
from uff.model.uff_pb2 import MetaGraph
except ImportError:
print("Loading uff directly from the package source code")
# @scha: To disable tensorflow import issue
import importlib
import types
import pkgutil
package = pkgutil.get_loader("uff")
# Returns __init__.py path
src_code = package.get_filename().replace('__init__.py', 'model/uff_pb2.py')
loader = importlib.machinery.SourceFileLoader('helper', src_code)
helper = types.ModuleType(loader.name)
loader.exec_module(helper)
MetaGraph = helper.MetaGraph
import numpy as np
import tensorrt as trt
from nvidia_tao_deploy.engine.builder import EngineBuilder
from nvidia_tao_deploy.engine.tensorfile import TensorFile
from nvidia_tao_deploy.engine.tensorfile_calibrator import TensorfileCalibrator
from nvidia_tao_deploy.engine.utils import generate_random_tensorfile, prepare_chunk
logging.basicConfig(format='%(asctime)s [TAO Toolkit] [%(levelname)s] %(name)s %(lineno)d: %(message)s',
level="INFO")
logger = logging.getLogger(__name__)
class MRCNNEngineBuilder(EngineBuilder):
"""Parses an UFF graph and builds a TensorRT engine from it."""
def __init__(
self,
data_format="channels_first",
**kwargs
):
"""Init.
Args:
data_format (str): data_format.
"""
super().__init__(**kwargs)
self._data_format = data_format
def set_input_output_node_names(self):
"""Set input output node names."""
self._output_node_names = ["generate_detections", "mask_fcn_logits/BiasAdd"]
self._input_node_names = ["Input"]
def get_input_dims(self, model_path):
"""Get input dimension of UFF model."""
metagraph = MetaGraph()
with open(model_path, "rb") as f:
metagraph.ParseFromString(f.read())
for node in metagraph.graphs[0].nodes:
if node.operation == "Input":
return np.array(node.fields['shape'].i_list.val)[1:]
raise ValueError("Input dimension is not found in the UFF metagraph.")
def create_network(self, model_path, file_format="uff"):
"""Parse the ONNX graph and create the corresponding TensorRT network definition.
Args:
model_path: The path to the UFF/ONNX graph to load.
"""
if file_format == "uff":
logger.info("Parsing UFF model")
self.network = self.builder.create_network()
self.parser = trt.UffParser()
self.set_input_output_node_names()
in_tensor_name = self._input_node_names[0]
self._input_dims = self.get_input_dims(model_path)
input_dict = {in_tensor_name: self._input_dims}
for key, value in input_dict.items():
if self._data_format == "channels_first":
self.parser.register_input(key, value, trt.UffInputOrder(0))
else:
self.parser.register_input(key, value, trt.UffInputOrder(1))
for name in self._output_node_names:
self.parser.register_output(name)
self.builder.max_batch_size = self.max_batch_size
try:
assert self.parser.parse(model_path, self.network, trt.DataType.FLOAT)
except AssertionError as e:
logger.error("Failed to parse UFF File")
_, _, tb = sys.exc_info()
traceback.print_tb(tb) # Fixed format
tb_info = traceback.extract_tb(tb)
_, line, _, text = tb_info[-1]
raise AssertionError(
f"UFF parsing failed on line {line} in statement {text}"
) from e
else:
logger.info("Parsing UFF model")
raise NotImplementedError("UFF for Faster RCNN is not supported")
def set_calibrator(self,
inputs=None,
calib_cache=None,
calib_input=None,
calib_num_images=5000,
calib_batch_size=8,
calib_data_file=None,
image_mean=None):
"""Simple function to set an Tensorfile based int8 calibrator.
Args:
calib_data_file: Path to the TensorFile. If the tensorfile doesn't exist
at this path, then one is created with either n_batches
of random tensors, images from the file in calib_input of dimensions
(batch_size,) + (input_dims).
calib_input: The path to a directory holding the calibration images.
calib_cache: The path where to write the calibration cache to,
or if it already exists, load it from.
calib_num_images: The maximum number of images to use for calibration.
calib_batch_size: The batch size to use for the calibration process.
image_mean: Image mean per channel.
Returns:
No explicit returns.
"""
logger.info("Calibrating using TensorfileCalibrator")
n_batches = calib_num_images // calib_batch_size
if not os.path.exists(calib_data_file):
self.generate_tensor_file(calib_data_file,
calib_input,
self._input_dims,
n_batches=n_batches,
batch_size=calib_batch_size,
image_mean=image_mean)
self.config.int8_calibrator = TensorfileCalibrator(calib_data_file,
calib_cache,
n_batches,
calib_batch_size)
def generate_tensor_file(self, data_file_name,
calibration_images_dir,
input_dims, n_batches=10,
batch_size=1, image_mean=None):
"""Generate calibration Tensorfile for int8 calibrator.
This function generates a calibration tensorfile from a directory of images, or dumps
n_batches of random numpy arrays of shape (batch_size,) + (input_dims).
Args:
data_file_name (str): Path to the output tensorfile to be saved.
calibration_images_dir (str): Path to the images to generate a tensorfile from.
input_dims (list): Input shape in CHW order.
n_batches (int): Number of batches to be saved.
batch_size (int): Number of images per batch.
image_mean (list): Image mean per channel.
Returns:
No explicit returns.
"""
if not os.path.exists(calibration_images_dir):
logger.info("Generating a tensorfile with random tensor images. This may work well as "
"a profiling tool, however, it may result in inaccurate results at "
"inference. Please generate a tensorfile using the tlt-int8-tensorfile, "
"or provide a custom directory of images for best performance.")
generate_random_tensorfile(data_file_name,
input_dims,
n_batches=n_batches,
batch_size=batch_size)
else:
# Preparing the list of images to be saved.
num_images = n_batches * batch_size
valid_image_ext = ['jpg', 'jpeg', 'png']
image_list = [os.path.join(calibration_images_dir, image)
for image in os.listdir(calibration_images_dir)
if image.split('.')[-1] in valid_image_ext]
if len(image_list) < num_images:
raise ValueError('Not enough number of images provided:'
f' {len(image_list)} < {num_images}')
image_idx = random.sample(xrange(len(image_list)), num_images)
self.set_data_preprocessing_parameters(input_dims, image_mean)
# Writing out processed dump.
with TensorFile(data_file_name, 'w') as f:
for chunk in tqdm(image_idx[x:x + batch_size] for x in xrange(0, len(image_idx),
batch_size)):
dump_data = prepare_chunk(chunk, image_list,
image_width=input_dims[2],
image_height=input_dims[1],
channels=input_dims[0],
batch_size=batch_size,
**self.preprocessing_arguments)
f.write(dump_data)
f.closed
def set_data_preprocessing_parameters(self, input_dims, image_mean=None):
"""Set data pre-processing parameters for the int8 calibration."""
num_channels = input_dims[0]
if num_channels == 3:
means = [123.675, 116.280, 103.53]
else:
raise NotImplementedError(f"Invalid number of dimensions {num_channels}.")
# ([R, G, B]/ 255 - [0.485, 0.456, 0.406]) / 0.224
# (R/G/B - mean) * ratio
self.preprocessing_arguments = {"scale": 0.017507,
"means": means,
"flip_channel": False}
| tao_deploy-main | nvidia_tao_deploy/cv/mask_rcnn/engine_builder.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TAO Deploy MRCNN."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_deploy-main | nvidia_tao_deploy/cv/mask_rcnn/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions used for mask visualization."""
import cv2
import numpy as np
from PIL import Image
from PIL import ImageColor
def draw_mask_on_image_array(pil_image, mask, color='red', alpha=0.4):
"""Draws mask on an image.
Args:
image: PIL image (img_height, img_width, 3)
mask: a uint8 numpy array of shape (img_height, img_width) with
values of either 0 or 1.
color: color to draw the keypoints with. Default is red.
alpha: transparency value between 0 and 1. (default: 0.4)
Raises:
ValueError: On incorrect data type for image or masks.
"""
rgb = ImageColor.getrgb(color)
solid_color = np.expand_dims(np.ones_like(mask), axis=2) * np.reshape(list(rgb), [1, 1, 3])
pil_solid_color = Image.fromarray(np.uint8(solid_color)).convert('RGBA')
pil_mask = Image.fromarray(np.uint8(255.0 * alpha * mask)).convert('L')
pil_image = Image.composite(pil_solid_color, pil_image, pil_mask)
return pil_image
def generate_segmentation_from_masks(masks,
detected_boxes,
image_height,
image_width,
is_image_mask=False):
"""Generates segmentation result from instance masks.
Args:
masks: a numpy array of shape [N, mask_height, mask_width] representing the
instance masks w.r.t. the `detected_boxes`.
detected_boxes: a numpy array of shape [N, 4] representing the reference
bounding boxes. The expected format is xywh.
image_height: an integer representing the height of the image.
image_width: an integer representing the width of the image.
is_image_mask: bool. True: input masks are whole-image masks. False: input
masks are bounding-box level masks.
Returns:
segms: a numpy array of shape [N, image_height, image_width] representing
the instance masks *pasted* on the image canvas.
"""
def expand_boxes(boxes, scale):
"""Expands an array of boxes by a given scale."""
# Reference:
# https://github.com/facebookresearch/Detectron/blob/master/detectron/utils/boxes.py#L227
# The `boxes` in the reference implementation is in [x1, y1, x2, y2] form,
# whereas `boxes` here is in [x1, y1, w, h] form
w_half = boxes[:, 2] * .5
h_half = boxes[:, 3] * .5
x_c = boxes[:, 0] + w_half
y_c = boxes[:, 1] + h_half
w_half *= scale
h_half *= scale
boxes_exp = np.zeros(boxes.shape)
boxes_exp[:, 0] = x_c - w_half
boxes_exp[:, 2] = x_c + w_half
boxes_exp[:, 1] = y_c - h_half
boxes_exp[:, 3] = y_c + h_half
return boxes_exp
# Reference:
# https://github.com/facebookresearch/Detectron/blob/master/detectron/core/test.py#L812
# To work around an issue with cv2.resize (it seems to automatically pad
# with repeated border values), we manually zero-pad the masks by 1 pixel
# prior to resizing back to the original image resolution. This prevents
# "top hat" artifacts. We therefore need to expand the reference boxes by an
# appropriate factor.
_, mask_height, mask_width = masks.shape
scale = max((mask_width + 2.0) / mask_width,
(mask_height + 2.0) / mask_height)
ref_boxes = expand_boxes(detected_boxes, scale)
ref_boxes = ref_boxes.astype(np.int32)
padded_mask = np.zeros((mask_height + 2, mask_width + 2), dtype=np.float32)
segms = []
for mask_ind, mask in enumerate(masks):
im_mask = np.zeros((image_height, image_width), dtype=np.uint8)
if is_image_mask:
# Process whole-image masks.
im_mask[:, :] = mask[:, :]
else:
# Process mask inside bounding boxes.
padded_mask[1:-1, 1:-1] = mask[:, :]
ref_box = ref_boxes[mask_ind, :]
w = ref_box[2] - ref_box[0] + 1
h = ref_box[3] - ref_box[1] + 1
w = np.maximum(w, 1)
h = np.maximum(h, 1)
mask = cv2.resize(padded_mask, (w, h))
mask = np.array(mask > 0.5, dtype=np.uint8)
x_0 = max(ref_box[0], 0)
x_1 = min(ref_box[2] + 1, image_width)
y_0 = max(ref_box[1], 0)
y_1 = min(ref_box[3] + 1, image_height)
im_mask[y_0:y_1, x_0:x_1] = \
mask[(y_0 - ref_box[1]):(y_1 - ref_box[1]), (x_0 - ref_box[0]):(x_1 - ref_box[0])]
segms.append(im_mask)
segms = np.array(segms)
assert masks.shape[0] == segms.shape[0]
return segms
| tao_deploy-main | nvidia_tao_deploy/cv/mask_rcnn/utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MRCNN loader."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from PIL import Image
from nvidia_tao_deploy.dataloader.coco import COCOLoader
from nvidia_tao_deploy.inferencer.preprocess_input import preprocess_input
class MRCNNCOCOLoader(COCOLoader):
"""MRCNN DataLoader."""
def preprocess_image(self, image_path):
"""The image preprocessor loads an image from disk and prepares it as needed for batching.
This includes padding, resizing, normalization, data type casting, and transposing.
This Image Batcher implements one algorithm for now:
* MRCNN: Resizes and pads the image to fit the input size.
Args:
image_path(str): The path to the image on disk to load.
Returns:
image (np.array): A numpy array holding the image sample, ready to be concatenated
into the rest of the batch
scale (list): the resize scale used, if any.
"""
def resize_pad(image, pad_color=(0, 0, 0)):
"""Resize and Pad.
A subroutine to implement padding and resizing. This will resize the image to fit
fully within the input size, and pads the remaining bottom-right portions with
the value provided.
Args:
image (PIL.Image): The PIL image object
pad_color (list): The RGB values to use for the padded area. Default: Black/Zeros.
Returns:
pad (PIL.Image): The PIL image object already padded and cropped,
scale (list): the resize scale used.
"""
width, height = image.size
width_scale = width / self.width
height_scale = height / self.height
scale = 1.0 / max(width_scale, height_scale)
image = image.resize(
(round(width * scale), round(height * scale)),
resample=Image.BILINEAR)
pad = Image.new("RGB", (self.width, self.height))
pad.paste(pad_color, [0, 0, self.width, self.height])
pad.paste(image)
return pad, scale
scale = None
image = Image.open(image_path)
image = image.convert(mode='RGB')
# zero pad
image, scale = resize_pad(image, (124, 116, 104))
image = np.asarray(image, dtype=self.dtype)
if self.data_format == "channels_first":
image = np.transpose(image, (2, 0, 1))
# Normalize and apply imag mean and std
image = preprocess_input(image, data_format=self.data_format, mode='torch')
return image, scale
| tao_deploy-main | nvidia_tao_deploy/cv/mask_rcnn/dataloader.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TAO Deploy MRCNN Proto."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_deploy-main | nvidia_tao_deploy/cv/mask_rcnn/proto/__init__.py |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nvidia_tao_deploy/cv/mask_rcnn/proto/experiment.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from nvidia_tao_deploy.cv.mask_rcnn.proto import maskrcnn_config_pb2 as nvidia__tao__deploy_dot_cv_dot_mask__rcnn_dot_proto_dot_maskrcnn__config__pb2
from nvidia_tao_deploy.cv.mask_rcnn.proto import data_config_pb2 as nvidia__tao__deploy_dot_cv_dot_mask__rcnn_dot_proto_dot_data__config__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='nvidia_tao_deploy/cv/mask_rcnn/proto/experiment.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n5nvidia_tao_deploy/cv/mask_rcnn/proto/experiment.proto\x1a:nvidia_tao_deploy/cv/mask_rcnn/proto/maskrcnn_config.proto\x1a\x36nvidia_tao_deploy/cv/mask_rcnn/proto/data_config.proto\"\xd1\x05\n\nExperiment\x12(\n\x0fmaskrcnn_config\x18\x01 \x01(\x0b\x32\x0f.MaskRCNNConfig\x12 \n\x0b\x64\x61ta_config\x18\x02 \x01(\x0b\x32\x0b.DataConfig\x12!\n\x19skip_checkpoint_variables\x18\x03 \x01(\t\x12\x18\n\x10train_batch_size\x18\x05 \x01(\r\x12\x1e\n\x16save_checkpoints_steps\x18\x06 \x01(\r\x12\x1a\n\x12num_steps_per_eval\x18\x07 \x01(\r\x12\x10\n\x08momentum\x18\x08 \x01(\x02\x12\x17\n\x0fl2_weight_decay\x18\n \x01(\x02\x12\x1c\n\x14warmup_learning_rate\x18\x0b \x01(\x02\x12\x1a\n\x12init_learning_rate\x18\x0c \x01(\x02\x12\"\n\x1aglobal_gradient_clip_ratio\x18\r \x01(\x02\x12\x13\n\x0btotal_steps\x18\x0e \x01(\r\x12 \n\x18visualize_images_summary\x18\x0f \x01(\x08\x12\x12\n\ncheckpoint\x18\x13 \x01(\t\x12\x17\n\x0f\x65val_batch_size\x18\x14 \x01(\r\x12\x14\n\x0cwarmup_steps\x18\x15 \x01(\r\x12\x1b\n\x13learning_rate_steps\x18\x16 \x01(\t\x12\"\n\x1alearning_rate_decay_levels\x18\x17 \x01(\t\x12\x0c\n\x04seed\x18\x18 \x01(\r\x12\x18\n\x10report_frequency\x18\x19 \x01(\r\x12\x0f\n\x07use_amp\x18\x1a \x01(\x08\x12\x19\n\x11pruned_model_path\x18\x1b \x01(\t\x12\x17\n\x0fl1_weight_decay\x18\x1c \x01(\x02\x12\x12\n\nnum_epochs\x18\x1d \x01(\r\x12\x1e\n\x16num_examples_per_epoch\x18\x1e \x01(\r\x12\x19\n\x11logging_frequency\x18\x1f \x01(\rb\x06proto3')
,
dependencies=[nvidia__tao__deploy_dot_cv_dot_mask__rcnn_dot_proto_dot_maskrcnn__config__pb2.DESCRIPTOR,nvidia__tao__deploy_dot_cv_dot_mask__rcnn_dot_proto_dot_data__config__pb2.DESCRIPTOR,])
_EXPERIMENT = _descriptor.Descriptor(
name='Experiment',
full_name='Experiment',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='maskrcnn_config', full_name='Experiment.maskrcnn_config', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='data_config', full_name='Experiment.data_config', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='skip_checkpoint_variables', full_name='Experiment.skip_checkpoint_variables', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='train_batch_size', full_name='Experiment.train_batch_size', index=3,
number=5, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='save_checkpoints_steps', full_name='Experiment.save_checkpoints_steps', index=4,
number=6, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='num_steps_per_eval', full_name='Experiment.num_steps_per_eval', index=5,
number=7, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='momentum', full_name='Experiment.momentum', index=6,
number=8, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='l2_weight_decay', full_name='Experiment.l2_weight_decay', index=7,
number=10, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='warmup_learning_rate', full_name='Experiment.warmup_learning_rate', index=8,
number=11, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='init_learning_rate', full_name='Experiment.init_learning_rate', index=9,
number=12, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='global_gradient_clip_ratio', full_name='Experiment.global_gradient_clip_ratio', index=10,
number=13, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='total_steps', full_name='Experiment.total_steps', index=11,
number=14, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='visualize_images_summary', full_name='Experiment.visualize_images_summary', index=12,
number=15, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='checkpoint', full_name='Experiment.checkpoint', index=13,
number=19, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='eval_batch_size', full_name='Experiment.eval_batch_size', index=14,
number=20, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='warmup_steps', full_name='Experiment.warmup_steps', index=15,
number=21, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='learning_rate_steps', full_name='Experiment.learning_rate_steps', index=16,
number=22, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='learning_rate_decay_levels', full_name='Experiment.learning_rate_decay_levels', index=17,
number=23, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='seed', full_name='Experiment.seed', index=18,
number=24, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='report_frequency', full_name='Experiment.report_frequency', index=19,
number=25, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='use_amp', full_name='Experiment.use_amp', index=20,
number=26, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='pruned_model_path', full_name='Experiment.pruned_model_path', index=21,
number=27, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='l1_weight_decay', full_name='Experiment.l1_weight_decay', index=22,
number=28, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='num_epochs', full_name='Experiment.num_epochs', index=23,
number=29, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='num_examples_per_epoch', full_name='Experiment.num_examples_per_epoch', index=24,
number=30, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='logging_frequency', full_name='Experiment.logging_frequency', index=25,
number=31, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=174,
serialized_end=895,
)
_EXPERIMENT.fields_by_name['maskrcnn_config'].message_type = nvidia__tao__deploy_dot_cv_dot_mask__rcnn_dot_proto_dot_maskrcnn__config__pb2._MASKRCNNCONFIG
_EXPERIMENT.fields_by_name['data_config'].message_type = nvidia__tao__deploy_dot_cv_dot_mask__rcnn_dot_proto_dot_data__config__pb2._DATACONFIG
DESCRIPTOR.message_types_by_name['Experiment'] = _EXPERIMENT
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Experiment = _reflection.GeneratedProtocolMessageType('Experiment', (_message.Message,), dict(
DESCRIPTOR = _EXPERIMENT,
__module__ = 'nvidia_tao_deploy.cv.mask_rcnn.proto.experiment_pb2'
# @@protoc_insertion_point(class_scope:Experiment)
))
_sym_db.RegisterMessage(Experiment)
# @@protoc_insertion_point(module_scope)
| tao_deploy-main | nvidia_tao_deploy/cv/mask_rcnn/proto/experiment_pb2.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TAO Deploy Config Base Utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from google.protobuf.text_format import Merge as merge_text_proto
from nvidia_tao_deploy.cv.mask_rcnn.proto.experiment_pb2 import Experiment
def load_proto(config):
"""Load the experiment proto."""
proto = Experiment()
def _load_from_file(filename, pb2):
if not os.path.exists(filename):
raise IOError(f"Specfile not found at: {filename}")
with open(filename, "r", encoding="utf-8") as f:
merge_text_proto(f.read(), pb2)
_load_from_file(config, proto)
return proto
| tao_deploy-main | nvidia_tao_deploy/cv/mask_rcnn/proto/utils.py |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nvidia_tao_deploy/cv/mask_rcnn/proto/maskrcnn_config.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='nvidia_tao_deploy/cv/mask_rcnn/proto/maskrcnn_config.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n:nvidia_tao_deploy/cv/mask_rcnn/proto/maskrcnn_config.proto\"\x8c\x07\n\x0eMaskRCNNConfig\x12\x1b\n\x13rpn_box_loss_weight\x18\x01 \x01(\x02\x12!\n\x19\x66\x61st_rcnn_box_loss_weight\x18\x02 \x01(\x02\x12\x1e\n\x16mrcnn_weight_loss_mask\x18\x03 \x01(\x02\x12\x11\n\tfreeze_bn\x18\x04 \x01(\x08\x12\x18\n\x10\x62\x62ox_reg_weights\x18\x05 \x01(\t\x12\x15\n\raspect_ratios\x18\x06 \x01(\t\x12\x14\n\x0cgt_mask_size\x18\x07 \x01(\r\x12\x1c\n\x14rpn_positive_overlap\x18\x08 \x01(\x02\x12\x1c\n\x14rpn_negative_overlap\x18\t \x01(\x02\x12\x1d\n\x15rpn_batch_size_per_im\x18\n \x01(\r\x12\x17\n\x0frpn_fg_fraction\x18\x0b \x01(\x02\x12\x14\n\x0crpn_min_size\x18\x0c \x01(\x02\x12\x19\n\x11\x62\x61tch_size_per_im\x18\r \x01(\r\x12\x13\n\x0b\x66g_fraction\x18\x0e \x01(\x02\x12\x11\n\tfg_thresh\x18\x0f \x01(\x02\x12\x14\n\x0c\x62g_thresh_hi\x18\x10 \x01(\x02\x12\x14\n\x0c\x62g_thresh_lo\x18\x11 \x01(\x02\x12\x1e\n\x16\x66\x61st_rcnn_mlp_head_dim\x18\x12 \x01(\r\x12\x14\n\x0cinclude_mask\x18\x13 \x01(\x08\x12\x18\n\x10mrcnn_resolution\x18\x14 \x01(\r\x12\x1e\n\x16train_rpn_pre_nms_topn\x18\x15 \x01(\r\x12\x1f\n\x17train_rpn_post_nms_topn\x18\x16 \x01(\r\x12\x1f\n\x17train_rpn_nms_threshold\x18\x17 \x01(\x02\x12!\n\x19test_detections_per_image\x18\x18 \x01(\r\x12\x10\n\x08test_nms\x18\x19 \x01(\x02\x12\x1d\n\x15test_rpn_pre_nms_topn\x18\x1a \x01(\r\x12\x1e\n\x16test_rpn_post_nms_topn\x18\x1b \x01(\r\x12\x1b\n\x13test_rpn_nms_thresh\x18\x1c \x01(\x02\x12\x11\n\tmin_level\x18\x1d \x01(\r\x12\x11\n\tmax_level\x18\x1e \x01(\r\x12\x12\n\nnum_scales\x18\x1f \x01(\r\x12\x14\n\x0c\x61nchor_scale\x18 \x01(\r\x12\x0c\n\x04\x61rch\x18! \x01(\t\x12\x0f\n\x07nlayers\x18\" \x01(\r\x12\x15\n\rfreeze_blocks\x18# \x01(\tb\x06proto3')
)
_MASKRCNNCONFIG = _descriptor.Descriptor(
name='MaskRCNNConfig',
full_name='MaskRCNNConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='rpn_box_loss_weight', full_name='MaskRCNNConfig.rpn_box_loss_weight', index=0,
number=1, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='fast_rcnn_box_loss_weight', full_name='MaskRCNNConfig.fast_rcnn_box_loss_weight', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='mrcnn_weight_loss_mask', full_name='MaskRCNNConfig.mrcnn_weight_loss_mask', index=2,
number=3, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='freeze_bn', full_name='MaskRCNNConfig.freeze_bn', index=3,
number=4, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='bbox_reg_weights', full_name='MaskRCNNConfig.bbox_reg_weights', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='aspect_ratios', full_name='MaskRCNNConfig.aspect_ratios', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='gt_mask_size', full_name='MaskRCNNConfig.gt_mask_size', index=6,
number=7, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='rpn_positive_overlap', full_name='MaskRCNNConfig.rpn_positive_overlap', index=7,
number=8, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='rpn_negative_overlap', full_name='MaskRCNNConfig.rpn_negative_overlap', index=8,
number=9, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='rpn_batch_size_per_im', full_name='MaskRCNNConfig.rpn_batch_size_per_im', index=9,
number=10, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='rpn_fg_fraction', full_name='MaskRCNNConfig.rpn_fg_fraction', index=10,
number=11, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='rpn_min_size', full_name='MaskRCNNConfig.rpn_min_size', index=11,
number=12, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='batch_size_per_im', full_name='MaskRCNNConfig.batch_size_per_im', index=12,
number=13, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='fg_fraction', full_name='MaskRCNNConfig.fg_fraction', index=13,
number=14, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='fg_thresh', full_name='MaskRCNNConfig.fg_thresh', index=14,
number=15, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='bg_thresh_hi', full_name='MaskRCNNConfig.bg_thresh_hi', index=15,
number=16, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='bg_thresh_lo', full_name='MaskRCNNConfig.bg_thresh_lo', index=16,
number=17, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='fast_rcnn_mlp_head_dim', full_name='MaskRCNNConfig.fast_rcnn_mlp_head_dim', index=17,
number=18, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='include_mask', full_name='MaskRCNNConfig.include_mask', index=18,
number=19, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='mrcnn_resolution', full_name='MaskRCNNConfig.mrcnn_resolution', index=19,
number=20, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='train_rpn_pre_nms_topn', full_name='MaskRCNNConfig.train_rpn_pre_nms_topn', index=20,
number=21, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='train_rpn_post_nms_topn', full_name='MaskRCNNConfig.train_rpn_post_nms_topn', index=21,
number=22, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='train_rpn_nms_threshold', full_name='MaskRCNNConfig.train_rpn_nms_threshold', index=22,
number=23, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='test_detections_per_image', full_name='MaskRCNNConfig.test_detections_per_image', index=23,
number=24, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='test_nms', full_name='MaskRCNNConfig.test_nms', index=24,
number=25, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='test_rpn_pre_nms_topn', full_name='MaskRCNNConfig.test_rpn_pre_nms_topn', index=25,
number=26, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='test_rpn_post_nms_topn', full_name='MaskRCNNConfig.test_rpn_post_nms_topn', index=26,
number=27, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='test_rpn_nms_thresh', full_name='MaskRCNNConfig.test_rpn_nms_thresh', index=27,
number=28, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='min_level', full_name='MaskRCNNConfig.min_level', index=28,
number=29, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='max_level', full_name='MaskRCNNConfig.max_level', index=29,
number=30, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='num_scales', full_name='MaskRCNNConfig.num_scales', index=30,
number=31, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='anchor_scale', full_name='MaskRCNNConfig.anchor_scale', index=31,
number=32, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='arch', full_name='MaskRCNNConfig.arch', index=32,
number=33, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='nlayers', full_name='MaskRCNNConfig.nlayers', index=33,
number=34, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='freeze_blocks', full_name='MaskRCNNConfig.freeze_blocks', index=34,
number=35, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=63,
serialized_end=971,
)
DESCRIPTOR.message_types_by_name['MaskRCNNConfig'] = _MASKRCNNCONFIG
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
MaskRCNNConfig = _reflection.GeneratedProtocolMessageType('MaskRCNNConfig', (_message.Message,), dict(
DESCRIPTOR = _MASKRCNNCONFIG,
__module__ = 'nvidia_tao_deploy.cv.mask_rcnn.proto.maskrcnn_config_pb2'
# @@protoc_insertion_point(class_scope:MaskRCNNConfig)
))
_sym_db.RegisterMessage(MaskRCNNConfig)
# @@protoc_insertion_point(module_scope)
| tao_deploy-main | nvidia_tao_deploy/cv/mask_rcnn/proto/maskrcnn_config_pb2.py |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nvidia_tao_deploy/cv/mask_rcnn/proto/data_config.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='nvidia_tao_deploy/cv/mask_rcnn/proto/data_config.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n6nvidia_tao_deploy/cv/mask_rcnn/proto/data_config.proto\"\xcb\x02\n\nDataConfig\x12\x12\n\nimage_size\x18\x01 \x01(\t\x12\x1a\n\x12\x61ugment_input_data\x18\x02 \x01(\x08\x12\x13\n\x0bnum_classes\x18\x03 \x01(\r\x12\"\n\x1askip_crowd_during_training\x18\x04 \x01(\x08\x12\x1d\n\x15training_file_pattern\x18\x06 \x01(\t\x12\x1f\n\x17validation_file_pattern\x18\x07 \x01(\t\x12\x15\n\rval_json_file\x18\x08 \x01(\t\x12\x14\n\x0c\x65val_samples\x18\t \x01(\r\x12\x1c\n\x14prefetch_buffer_size\x18\n \x01(\r\x12\x1b\n\x13shuffle_buffer_size\x18\x0b \x01(\r\x12\x11\n\tn_workers\x18\x0c \x01(\r\x12\x19\n\x11max_num_instances\x18\r \x01(\rb\x06proto3')
)
_DATACONFIG = _descriptor.Descriptor(
name='DataConfig',
full_name='DataConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='image_size', full_name='DataConfig.image_size', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='augment_input_data', full_name='DataConfig.augment_input_data', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='num_classes', full_name='DataConfig.num_classes', index=2,
number=3, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='skip_crowd_during_training', full_name='DataConfig.skip_crowd_during_training', index=3,
number=4, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='training_file_pattern', full_name='DataConfig.training_file_pattern', index=4,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='validation_file_pattern', full_name='DataConfig.validation_file_pattern', index=5,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='val_json_file', full_name='DataConfig.val_json_file', index=6,
number=8, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='eval_samples', full_name='DataConfig.eval_samples', index=7,
number=9, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='prefetch_buffer_size', full_name='DataConfig.prefetch_buffer_size', index=8,
number=10, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='shuffle_buffer_size', full_name='DataConfig.shuffle_buffer_size', index=9,
number=11, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='n_workers', full_name='DataConfig.n_workers', index=10,
number=12, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='max_num_instances', full_name='DataConfig.max_num_instances', index=11,
number=13, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=59,
serialized_end=390,
)
DESCRIPTOR.message_types_by_name['DataConfig'] = _DATACONFIG
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
DataConfig = _reflection.GeneratedProtocolMessageType('DataConfig', (_message.Message,), dict(
DESCRIPTOR = _DATACONFIG,
__module__ = 'nvidia_tao_deploy.cv.mask_rcnn.proto.data_config_pb2'
# @@protoc_insertion_point(class_scope:DataConfig)
))
_sym_db.RegisterMessage(DataConfig)
# @@protoc_insertion_point(module_scope)
| tao_deploy-main | nvidia_tao_deploy/cv/mask_rcnn/proto/data_config_pb2.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MRCNN convert etlt/onnx model to TRT engine."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import logging
import os
import tempfile
from nvidia_tao_deploy.cv.common.decorators import monitor_status
from nvidia_tao_deploy.cv.mask_rcnn.engine_builder import MRCNNEngineBuilder
from nvidia_tao_deploy.utils.decoding import decode_model
logging.basicConfig(format='%(asctime)s [TAO Toolkit] [%(levelname)s] %(name)s %(lineno)d: %(message)s',
level="INFO")
logger = logging.getLogger(__name__)
DEFAULT_MAX_BATCH_SIZE = 1
DEFAULT_MIN_BATCH_SIZE = 1
DEFAULT_OPT_BATCH_SIZE = 1
@monitor_status(name='mask_rcnn', mode='gen_trt_engine')
def main(args):
"""Convert encrypted uff or onnx model to TRT engine."""
# decrypt etlt
tmp_onnx_file, file_format = decode_model(args.model_path, args.key)
if args.engine_file is not None or args.data_type == 'int8':
if args.engine_file is None:
engine_handle, temp_engine_path = tempfile.mkstemp()
os.close(engine_handle)
output_engine_path = temp_engine_path
else:
output_engine_path = args.engine_file
builder = MRCNNEngineBuilder(verbose=args.verbose,
workspace=args.max_workspace_size,
min_batch_size=args.min_batch_size,
opt_batch_size=args.opt_batch_size,
max_batch_size=args.max_batch_size,
strict_type_constraints=args.strict_type_constraints)
builder.create_network(tmp_onnx_file, file_format)
builder.create_engine(
output_engine_path,
args.data_type,
calib_data_file=args.cal_data_file,
calib_input=args.cal_image_dir,
calib_cache=args.cal_cache_file,
calib_num_images=args.batch_size * args.batches,
calib_batch_size=args.batch_size)
logging.info("Export finished successfully.")
def build_command_line_parser(parser=None):
"""Build the command line parser using argparse.
Args:
parser (subparser): Provided from the wrapper script to build a chained
parser mechanism.
Returns:
parser
"""
if parser is None:
parser = argparse.ArgumentParser(prog='gen_trt_engine', description='Generate TRT engine of MRCNN model.')
parser.add_argument(
'-m',
'--model_path',
type=str,
required=True,
help='Path to a MRCNN .etlt or .onnx model file.'
)
parser.add_argument(
'-k',
'--key',
type=str,
required=False,
help='Key to save or load a .etlt model.'
)
parser.add_argument(
"--data_type",
type=str,
default="fp32",
help="Data type for the TensorRT export.",
choices=["fp32", "fp16", "int8"])
parser.add_argument(
"--cal_image_dir",
default="",
type=str,
help="Directory of images to run int8 calibration.")
parser.add_argument(
"--cal_data_file",
default=None,
type=str,
help="Tensorfile to run calibration for int8 optimization.")
parser.add_argument(
'--cal_cache_file',
default=None,
type=str,
help='Calibration cache file to write to.')
parser.add_argument(
"--engine_file",
type=str,
default=None,
help="Path to the exported TRT engine.")
parser.add_argument(
"--max_batch_size",
type=int,
default=DEFAULT_MAX_BATCH_SIZE,
help="Max batch size for TensorRT engine builder.")
parser.add_argument(
"--min_batch_size",
type=int,
default=DEFAULT_MIN_BATCH_SIZE,
help="Min batch size for TensorRT engine builder.")
parser.add_argument(
"--opt_batch_size",
type=int,
default=DEFAULT_OPT_BATCH_SIZE,
help="Opt batch size for TensorRT engine builder.")
parser.add_argument(
"--batch_size",
type=int,
default=1,
help="Number of images per batch.")
parser.add_argument(
"--batches",
type=int,
default=10,
help="Number of batches to calibrate over.")
parser.add_argument(
"--max_workspace_size",
type=int,
default=2,
help="Max memory workspace size to allow in Gb for TensorRT engine builder (default: 2).")
parser.add_argument(
"-s",
"--strict_type_constraints",
action="store_true",
default=False,
help="A Boolean flag indicating whether to apply the \
TensorRT strict type constraints when building the TensorRT engine.")
parser.add_argument(
"-v",
"--verbose",
action="store_true",
default=False,
help="Verbosity of the logger.")
parser.add_argument(
'-r',
'--results_dir',
type=str,
required=True,
default=None,
help='Output directory where the log is saved.'
)
return parser
def parse_command_line_arguments(args=None):
"""Simple function to parse command line arguments."""
parser = build_command_line_parser()
return parser.parse_args(args)
if __name__ == '__main__':
args = parse_command_line_arguments()
main(args)
| tao_deploy-main | nvidia_tao_deploy/cv/mask_rcnn/scripts/gen_trt_engine.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TAO Deploy Mask RCNN scripts module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_deploy-main | nvidia_tao_deploy/cv/mask_rcnn/scripts/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Standalone TensorRT inference."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
from PIL import Image
import logging
import json
from tqdm.auto import tqdm
from nvidia_tao_deploy.cv.common.decorators import monitor_status
from nvidia_tao_deploy.cv.mask_rcnn.inferencer import MRCNNInferencer
from nvidia_tao_deploy.cv.mask_rcnn.proto.utils import load_proto
from nvidia_tao_deploy.utils.image_batcher import ImageBatcher
logging.basicConfig(format='%(asctime)s [TAO Toolkit] [%(levelname)s] %(name)s %(lineno)d: %(message)s',
level="INFO")
logger = logging.getLogger(__name__)
def resize_pad(image, model_width, model_height, pad_color=(0, 0, 0)):
"""Resize and Pad.
A subroutine to implement padding and resizing. This will resize the image to fit
fully within the input size, and pads the remaining bottom-right portions with
the value provided.
Args:
image (PIL.Image): The PIL image object
pad_color (list): The RGB values to use for the padded area. Default: Black/Zeros.
Returns:
pad (PIL.Image): The PIL image object already padded and cropped,
scale (list): the resize scale used.
"""
width, height = image.size
width_scale = width / model_width
height_scale = height / model_height
scale = 1.0 / max(width_scale, height_scale)
image = image.resize(
(round(width * scale), round(height * scale)),
resample=Image.BILINEAR)
pad = Image.new("RGB", (model_width, model_height))
pad.paste(pad_color, [0, 0, model_width, model_height])
pad.paste(image)
padded = (abs(round(width * scale) - model_width), abs(round(height * scale) - model_height))
return pad, scale, padded
def get_label_dict(label_txt):
"""Create label dict from txt file."""
with open(label_txt, 'r', encoding="utf-8") as f:
labels = f.readlines()
result = {i + 1: label.strip() for i, label in enumerate(labels)}
result[-1] = "background"
return result
@monitor_status(name='mask_rcnn', mode='inference')
def main(args):
"""MRCNN TRT inference."""
# Load from proto-based spec file
es = load_proto(args.experiment_spec)
mask_size = es.maskrcnn_config.mrcnn_resolution if es.maskrcnn_config.mrcnn_resolution else 28
nms_size = es.maskrcnn_config.test_detections_per_image if es.maskrcnn_config.test_detections_per_image else 100
trt_infer = MRCNNInferencer(args.model_path,
nms_size=nms_size,
n_classes=es.data_config.num_classes,
mask_size=mask_size)
# Inference may not have labels. Hence, use image batcher
batch_size = trt_infer.max_batch_size
batcher = ImageBatcher(args.image_dir,
(batch_size,) + trt_infer._input_shape,
trt_infer.inputs[0].host.dtype,
preprocessor="MRCNN")
# Create results directories
if args.results_dir is None:
results_dir = os.path.dirname(args.model_path)
else:
results_dir = args.results_dir
os.makedirs(results_dir, exist_ok=True)
output_annotate_root = os.path.join(results_dir, "images_annotated")
output_label_root = os.path.join(results_dir, "labels")
os.makedirs(output_annotate_root, exist_ok=True)
os.makedirs(output_label_root, exist_ok=True)
if not os.path.exists(args.class_map):
raise FileNotFoundError(f"Class map is required for inference! {args.class_map} does not exist.")
inv_classes = get_label_dict(args.class_map)
for batch, img_paths, scales in tqdm(batcher.get_batch(), total=batcher.num_batches, desc="Producing predictions"):
detections = trt_infer.infer(batch, scales)
for idx, img_path in enumerate(img_paths):
# Load Image
img = Image.open(img_path)
orig_width, orig_height = img.size
img, sc, padding = resize_pad(img, trt_infer.width, trt_infer.height)
detections['detection_boxes'][idx] /= sc
bbox_img, label_strings = trt_infer.draw_bbox_and_segm(img,
detections['detection_classes'][idx],
detections['detection_scores'][idx],
detections['detection_boxes'][idx],
detections['detection_masks'][idx],
inv_classes,
args.threshold)
# Crop out padded region and resize to original image
bbox_img = bbox_img.crop((0, 0, trt_infer.width - padding[0], trt_infer.height - padding[1]))
bbox_img = bbox_img.resize((orig_width, orig_height))
img_filename = os.path.basename(img_path)
bbox_img.save(os.path.join(output_annotate_root, img_filename))
# Store labels
filename, _ = os.path.splitext(img_filename)
label_file_name = os.path.join(output_label_root, filename + ".json")
# Add image path in label dump
for i in range(len(label_strings)):
label_strings[i]['image_id'] = img_path
with open(label_file_name, "w", encoding="utf-8") as f:
json.dump(label_strings, f, indent=4, sort_keys=True)
logging.info("Finished inference.")
def build_command_line_parser(parser=None):
"""Build the command line parser using argparse.
Args:
parser (subparser): Provided from the wrapper script to build a chained
parser mechanism.
Returns:
parser
"""
if parser is None:
parser = argparse.ArgumentParser(prog='infer', description='Inference with a MRCNN TRT model.')
parser.add_argument(
'-i',
'--image_dir',
type=str,
required=False,
default=None,
help='Input directory of images')
parser.add_argument(
'-e',
'--experiment_spec',
type=str,
required=True,
help='Path to the experiment spec file.'
)
parser.add_argument(
'-m',
'--model_path',
type=str,
required=True,
help='Path to the MRCNN TensorRT engine.'
)
parser.add_argument(
'-r',
'--results_dir',
type=str,
required=True,
default=None,
help='Output directory where the log is saved.'
)
parser.add_argument(
'-c',
'--class_map',
type=str,
default=None,
required=True,
help='The path to the class label file.'
)
parser.add_argument(
'-t',
'--threshold',
type=float,
default=0.6,
help='Confidence threshold for inference.'
)
parser.add_argument(
'--include_mask',
action='store_true',
required=False,
default=None,
help=argparse.SUPPRESS
)
return parser
def parse_command_line_arguments(args=None):
"""Simple function to parse command line arguments."""
parser = build_command_line_parser(args)
return parser.parse_args(args)
if __name__ == '__main__':
args = parse_command_line_arguments()
main(args)
| tao_deploy-main | nvidia_tao_deploy/cv/mask_rcnn/scripts/inference.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Standalone TensorRT inference."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import operator
import copy
import logging
import json
import six
import numpy as np
from tqdm.auto import tqdm
from nvidia_tao_deploy.cv.common.decorators import monitor_status
from nvidia_tao_deploy.cv.mask_rcnn.dataloader import MRCNNCOCOLoader
from nvidia_tao_deploy.cv.mask_rcnn.inferencer import MRCNNInferencer
from nvidia_tao_deploy.cv.mask_rcnn.proto.utils import load_proto
from nvidia_tao_deploy.metrics.coco_metric import EvaluationMetric
logging.basicConfig(format='%(asctime)s [TAO Toolkit] [%(levelname)s] %(name)s %(lineno)d: %(message)s',
level="INFO")
logger = logging.getLogger(__name__)
@monitor_status(name='mask_rcnn', mode='evaluation')
def main(args):
"""MRCNN TRT evaluation."""
# Load from proto-based spec file
es = load_proto(args.experiment_spec)
val_json_file = es.data_config.val_json_file
mask_size = es.maskrcnn_config.mrcnn_resolution if es.maskrcnn_config.mrcnn_resolution else 28
nms_size = es.maskrcnn_config.test_detections_per_image if es.maskrcnn_config.test_detections_per_image else 100
eval_samples = es.data_config.eval_samples if es.data_config.eval_samples else 0
eval_metric = EvaluationMetric(val_json_file,
include_mask=True) # Only True is supported
trt_infer = MRCNNInferencer(args.model_path,
nms_size=nms_size,
n_classes=es.data_config.num_classes,
mask_size=mask_size)
dl = MRCNNCOCOLoader(
val_json_file,
batch_size=trt_infer.max_batch_size,
data_format="channels_first",
shape=[trt_infer.max_batch_size] + list(trt_infer._input_shape),
dtype=trt_infer.inputs[0].host.dtype,
image_dir=args.image_dir,
eval_samples=eval_samples)
predictions = {
'detection_scores': [],
'detection_boxes': [],
'detection_classes': [],
'detection_masks': [],
'source_id': [],
'image_info': [],
'num_detections': []
}
def evaluation_preds(preds):
# Essential to avoid modifying the source dict
_preds = copy.deepcopy(preds)
for k, _ in six.iteritems(_preds):
_preds[k] = np.concatenate(_preds[k], axis=0)
eval_results = eval_metric.predict_metric_fn(_preds)
return eval_results
for imgs, scale, source_id, labels in tqdm(dl, total=len(dl), desc="Producing predictions"):
image = np.array(imgs)
image_info = []
for i, label in enumerate(labels):
image_info.append([label[-1][0], label[-1][1], scale[i], label[-1][2], label[-1][3]])
image_info = np.array(image_info)
detections = trt_infer.infer(image, scale)
predictions['detection_classes'].append(detections['detection_classes'])
predictions['detection_scores'].append(detections['detection_scores'])
predictions['detection_boxes'].append(detections['detection_boxes'])
predictions['detection_masks'].append(detections['detection_masks'])
predictions['num_detections'].append(detections['num_detections'])
predictions['image_info'].append(image_info)
predictions['source_id'].append(source_id)
eval_results = evaluation_preds(preds=predictions)
for key, value in sorted(eval_results.items(), key=operator.itemgetter(0)):
eval_results[key] = float(value)
logging.info("%s: %.9f", key, value)
if args.results_dir is None:
results_dir = os.path.dirname(args.model_path)
else:
results_dir = args.results_dir
with open(os.path.join(results_dir, "results.json"), "w", encoding="utf-8") as f:
json.dump(eval_results, f)
logging.info("Finished evaluation.")
def build_command_line_parser(parser=None):
"""Build the command line parser using argparse.
Args:
parser (subparser): Provided from the wrapper script to build a chained
parser mechanism.
Returns:
parser
"""
if parser is None:
parser = argparse.ArgumentParser(prog='eval', description='Evaluate with a MRCNN TRT model.')
parser.add_argument(
'-i',
'--image_dir',
type=str,
required=False,
default=None,
help='Input directory of images')
parser.add_argument(
'-e',
'--experiment_spec',
type=str,
required=True,
help='Path to the experiment spec file.'
)
parser.add_argument(
'-m',
'--model_path',
type=str,
required=True,
help='Path to the MRCNN TensorRT engine.'
)
parser.add_argument(
'-r',
'--results_dir',
type=str,
required=True,
default=None,
help='Output directory where the log is saved.'
)
parser.add_argument(
'-b',
'--batch_size',
type=int,
required=False,
default=1,
help='Batch size.'
)
return parser
def parse_command_line_arguments(args=None):
"""Simple function to parse command line arguments."""
parser = build_command_line_parser(args)
return parser.parse_args(args)
if __name__ == '__main__':
args = parse_command_line_arguments()
main(args)
| tao_deploy-main | nvidia_tao_deploy/cv/mask_rcnn/scripts/evaluate.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Entrypoint module for mask rcnn."""
| tao_deploy-main | nvidia_tao_deploy/cv/mask_rcnn/entrypoint/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TAO Deploy command line wrapper to invoke CLI scripts."""
import sys
from nvidia_tao_deploy.cv.common.entrypoint.entrypoint_proto import launch_job
import nvidia_tao_deploy.cv.mask_rcnn.scripts
def main():
"""Function to launch the job."""
launch_job(nvidia_tao_deploy.cv.mask_rcnn.scripts, "mask_rcnn", sys.argv[1:])
if __name__ == "__main__":
main()
| tao_deploy-main | nvidia_tao_deploy/cv/mask_rcnn/entrypoint/mask_rcnn.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility class for performing TensorRT image inference."""
import os
import cv2
from PIL import Image
import numpy as np
import tensorrt as trt
from nvidia_tao_deploy.cv.unet.utils import get_color_id, overlay_seg_image
from nvidia_tao_deploy.inferencer.trt_inferencer import TRTInferencer
from nvidia_tao_deploy.inferencer.utils import allocate_buffers, do_inference
def trt_output_process_fn(y_encoded, model_output_width, model_output_height, activation="softmax"):
"""Function to process TRT model output."""
predictions_batch = []
for idx in range(y_encoded[0].shape[0]):
pred = np.reshape(y_encoded[0][idx, ...], (model_output_height,
model_output_width,
1))
pred = np.squeeze(pred, axis=-1)
if activation == "sigmoid":
pred = np.where(pred > 0.5, 1, 0)
pred = pred.astype(np.int32)
predictions_batch.append(pred)
return np.array(predictions_batch)
class UNetInferencer(TRTInferencer):
"""Manages TensorRT objects for model inference."""
def __init__(self, engine_path, input_shape=None, batch_size=None, data_format="channel_first", activation="softmax"):
"""Initializes TensorRT objects needed for model inference.
Args:
engine_path (str): path where TensorRT engine should be stored
input_shape (tuple): (batch, channel, height, width) for dynamic shape engine
batch_size (int): batch size for dynamic shape engine
data_format (str): either channel_first or channel_last
"""
# Load TRT engine
super().__init__(engine_path)
self.max_batch_size = self.engine.max_batch_size
self.execute_v2 = False
self.activation = activation
# Execution context is needed for inference
self.context = None
# Allocate memory for multiple usage [e.g. multiple batch inference]
self._input_shape = []
for binding in range(self.engine.num_bindings):
if self.engine.binding_is_input(binding):
self._input_shape = self.engine.get_binding_shape(binding)[-3:]
assert len(self._input_shape) == 3, "Engine doesn't have valid input dimensions"
if data_format == "channel_first":
self.height = self._input_shape[1]
self.width = self._input_shape[2]
else:
self.height = self._input_shape[0]
self.width = self._input_shape[1]
# set binding_shape for dynamic input
if (input_shape is not None) or (batch_size is not None):
self.context = self.engine.create_execution_context()
if input_shape is not None:
self.context.set_binding_shape(0, input_shape)
self.max_batch_size = input_shape[0]
else:
self.context.set_binding_shape(0, [batch_size] + list(self._input_shape))
self.max_batch_size = batch_size
self.execute_v2 = True
# This allocates memory for network inputs/outputs on both CPU and GPU
self.inputs, self.outputs, self.bindings, self.stream = allocate_buffers(self.engine,
self.context)
if self.context is None:
self.context = self.engine.create_execution_context()
input_volume = trt.volume(self._input_shape)
self.numpy_array = np.zeros((self.max_batch_size, input_volume))
def infer(self, imgs):
"""Infers model on batch of same sized images resized to fit the model.
Args:
image_paths (str): paths to images, that will be packed into batch
and fed into model
"""
# Verify if the supplied batch size is not too big
max_batch_size = self.max_batch_size
actual_batch_size = len(imgs)
if actual_batch_size > max_batch_size:
raise ValueError(f"image_paths list bigger ({actual_batch_size}) than \
engine max batch size ({max_batch_size})")
self.numpy_array[:actual_batch_size] = imgs.reshape(actual_batch_size, -1)
# ...copy them into appropriate place into memory...
# (self.inputs was returned earlier by allocate_buffers())
np.copyto(self.inputs[0].host, self.numpy_array.ravel())
# ...fetch model outputs...
results = do_inference(
self.context, bindings=self.bindings, inputs=self.inputs,
outputs=self.outputs, stream=self.stream,
batch_size=max_batch_size,
execute_v2=self.execute_v2)
# ...and return results up to the actual batch size.
y_pred = [i.reshape(max_batch_size, -1)[:actual_batch_size] for i in results]
# Process TRT outputs to proper format
return trt_output_process_fn(y_pred, self.width, self.height, self.activation)
def __del__(self):
"""Clear things up on object deletion."""
# Clear session and buffer
if self.trt_runtime:
del self.trt_runtime
if self.context:
del self.context
if self.engine:
del self.engine
if self.stream:
del self.stream
# Loop through inputs and free inputs.
for inp in self.inputs:
inp.device.free()
# Loop through outputs and free them.
for out in self.outputs:
out.device.free()
def visualize_masks(self, img_paths, predictions, out_dir, num_classes=2,
input_image_type="rgb", resize_padding=False, resize_method='BILINEAR'):
"""Store overlaid image and predictions to png format.
Args:
img_paths: The input image names.
predictions: Predicted masks numpy arrays.
out_dir: Output dir where the visualization is saved.
num_classes: Number of classes used.
input_image_type: The input type of image (color/ grayscale).
resize_padding: If padding was used or not.
resize_method: Resize method used (Default: BILINEAR).
"""
colors = get_color_id(num_classes)
vis_dir = os.path.join(out_dir, "vis_overlay")
label_dir = os.path.join(out_dir, "mask_labels")
os.makedirs(vis_dir, exist_ok=True)
os.makedirs(label_dir, exist_ok=True)
for pred, img_path in zip(predictions, img_paths):
segmented_img = np.zeros((self.height, self.width, 3))
img_file_name = os.path.basename(img_path)
for c in range(len(colors)):
seg_arr_c = pred[:, :] == c
segmented_img[:, :, 0] += ((seg_arr_c) * (colors[c][0])).astype('uint8')
segmented_img[:, :, 1] += ((seg_arr_c) * (colors[c][1])).astype('uint8')
segmented_img[:, :, 2] += ((seg_arr_c) * (colors[c][2])).astype('uint8')
orig_image = cv2.imread(img_path)
if input_image_type == "grayscale":
pred = pred.astype(np.uint8) * 255
fused_img = Image.fromarray(pred).resize(size=(self.width, self.height),
resample=Image.BILINEAR)
# Save overlaid image
fused_img.save(os.path.join(vis_dir, img_file_name))
else:
segmented_img = np.zeros((self.height, self.width, 3))
for c in range(len(colors)):
seg_arr_c = pred[:, :] == c
segmented_img[:, :, 0] += ((seg_arr_c) * (colors[c][0])).astype('uint8')
segmented_img[:, :, 1] += ((seg_arr_c) * (colors[c][1])).astype('uint8')
segmented_img[:, :, 2] += ((seg_arr_c) * (colors[c][2])).astype('uint8')
orig_image = cv2.imread(img_path)
fused_img = overlay_seg_image(orig_image, segmented_img, resize_padding,
resize_method)
# Save overlaid image
cv2.imwrite(os.path.join(vis_dir, img_file_name), fused_img)
mask_name = f"{os.path.splitext(img_file_name)[0]}.png"
# Save predictions
cv2.imwrite(os.path.join(label_dir, mask_name), pred)
| tao_deploy-main | nvidia_tao_deploy/cv/unet/inferencer.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""UNet TensorRT engine builder."""
import logging
import os
import random
from six.moves import xrange
import sys
import onnx
from tqdm import tqdm
import tensorrt as trt
from nvidia_tao_deploy.cv.common.constants import VALID_IMAGE_EXTENSIONS
from nvidia_tao_deploy.engine.builder import EngineBuilder
from nvidia_tao_deploy.engine.tensorfile import TensorFile
from nvidia_tao_deploy.engine.tensorfile_calibrator import TensorfileCalibrator
from nvidia_tao_deploy.engine.utils import prepare_chunk
logging.basicConfig(format='%(asctime)s [TAO Toolkit] [%(levelname)s] %(name)s %(lineno)d: %(message)s',
level="INFO")
logger = logging.getLogger(__name__)
class UNetEngineBuilder(EngineBuilder):
"""Parses an UFF/ONNX graph and builds a TensorRT engine from it."""
def __init__(
self,
image_list=None,
data_format="channels_first",
**kwargs
):
"""Init.
Args:
data_format (str): data_format.
image_list (list): list of training image list from the experiment spec.
"""
super().__init__(**kwargs)
self._data_format = data_format
self.training_image_list = image_list
def get_onnx_input_dims(self, model_path):
"""Get input dimension of ONNX model."""
onnx_model = onnx.load(model_path)
onnx_inputs = onnx_model.graph.input
logger.info('List inputs:')
for i, inputs in enumerate(onnx_inputs):
logger.info('Input %s -> %s.', i, inputs.name)
logger.info('%s.', [i.dim_value for i in inputs.type.tensor_type.shape.dim][1:])
logger.info('%s.', [i.dim_value for i in inputs.type.tensor_type.shape.dim][0])
return [i.dim_value for i in inputs.type.tensor_type.shape.dim][:]
def create_network(self, model_path, file_format="onnx"):
"""Parse the UFF/ONNX graph and create the corresponding TensorRT network definition.
Args:
model_path: The path to the UFF/ONNX graph to load.
file_format: The file format of the decrypted etlt file (default: onnx).
"""
if file_format == "onnx":
logger.info("Parsing ONNX model")
self._input_dims = self.get_onnx_input_dims(model_path)
self.batch_size = self._input_dims[0]
network_flags = (1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH))
self.network = self.builder.create_network(network_flags)
self.parser = trt.OnnxParser(self.network, self.trt_logger)
model_path = os.path.realpath(model_path)
with open(model_path, "rb") as f:
if not self.parser.parse(f.read()):
logger.error("Failed to load ONNX file: %s", model_path)
for error in range(self.parser.num_errors):
logger.error(self.parser.get_error(error))
sys.exit(1)
inputs = [self.network.get_input(i) for i in range(self.network.num_inputs)]
outputs = [self.network.get_output(i) for i in range(self.network.num_outputs)]
logger.info("Network Description")
for input in inputs: # noqa pylint: disable=W0622
logger.info("Input '%s' with shape %s and dtype %s", input.name, input.shape, input.dtype)
for output in outputs:
logger.info("Output '%s' with shape %s and dtype %s", output.name, output.shape, output.dtype)
if self.batch_size <= 0: # dynamic batch size
logger.info("dynamic batch size handling")
opt_profile = self.builder.create_optimization_profile()
model_input = self.network.get_input(0)
input_shape = model_input.shape
input_name = model_input.name
real_shape_min = (self.min_batch_size, input_shape[1],
input_shape[2], input_shape[3])
real_shape_opt = (self.opt_batch_size, input_shape[1],
input_shape[2], input_shape[3])
real_shape_max = (self.max_batch_size, input_shape[1],
input_shape[2], input_shape[3])
opt_profile.set_shape(input=input_name,
min=real_shape_min,
opt=real_shape_opt,
max=real_shape_max)
self.config.add_optimization_profile(opt_profile)
else:
logger.info("Parsing UFF model")
raise NotImplementedError("UFF for YOLO_v3 is not supported")
def set_calibrator(self,
inputs=None,
calib_cache=None,
calib_input=None,
calib_num_images=5000,
calib_batch_size=8,
calib_data_file=None,
image_mean=None):
"""Simple function to set an Tensorfile based int8 calibrator.
Args:
calib_data_file: Path to the TensorFile. If the tensorfile doesn't exist
at this path, then one is created with either n_batches
of random tensors, images from the file in calib_input of dimensions
(batch_size,) + (input_dims).
calib_input: The path to a directory holding the calibration images.
calib_cache: The path where to write the calibration cache to,
or if it already exists, load it from.
calib_num_images: The maximum number of images to use for calibration.
calib_batch_size: The batch size to use for the calibration process.
image_mean: Image mean per channel.
Returns:
No explicit returns.
"""
logger.info("Calibrating using TensorfileCalibrator")
n_batches = calib_num_images // calib_batch_size
if not os.path.exists(calib_data_file):
self.generate_tensor_file(calib_data_file,
calib_input,
self._input_dims[1:],
n_batches=n_batches,
batch_size=calib_batch_size,
image_mean=image_mean)
self.config.int8_calibrator = TensorfileCalibrator(calib_data_file,
calib_cache,
n_batches,
calib_batch_size)
def generate_tensor_file(self, data_file_name,
calibration_images_dir,
input_dims, n_batches=10,
batch_size=1, image_mean=None):
"""Generate calibration Tensorfile for int8 calibrator.
This function generates a calibration tensorfile from a directory of images, or dumps
n_batches of random numpy arrays of shape (batch_size,) + (input_dims).
Args:
data_file_name (str): Path to the output tensorfile to be saved.
calibration_images_dir (str): Path to the images to generate a tensorfile from.
input_dims (list): Input shape in CHW order.
n_batches (int): Number of batches to be saved.
batch_size (int): Number of images per batch.
image_mean (list): Image mean per channel.
Returns:
No explicit returns.
"""
# Preparing the list of images to be saved.
num_images = n_batches * batch_size
if os.path.exists(calibration_images_dir):
image_list = []
for image in os.listdir(calibration_images_dir):
if image.lower().endswith(VALID_IMAGE_EXTENSIONS):
image_list.append(os.path.join(calibration_images_dir, image))
else:
logger.info("Calibration image directory is not specified. Using training images from experiment spec!")
if self.training_image_list[0].endswith(".txt"):
image_list = []
for imgs in self.training_image_list:
# Read image files
with open(imgs, encoding="utf-8") as f:
x_set = f.readlines()
for f_im in x_set:
# Ensuring all image files are present
f_im = f_im.strip()
if f_im.lower().endswith(VALID_IMAGE_EXTENSIONS):
image_list.append(f_im)
else:
image_list = [os.path.join(self.training_image_list[0], f) for f in os.listdir(self.training_image_list[0])
if f.lower().endswith(VALID_IMAGE_EXTENSIONS)]
if len(image_list) < num_images:
raise ValueError('Not enough number of images provided:'
f' {len(image_list)} < {num_images}')
image_idx = random.sample(xrange(len(image_list)), num_images)
self.set_data_preprocessing_parameters(input_dims, image_mean)
# Writing out processed dump.
with TensorFile(data_file_name, 'w') as f:
for chunk in tqdm(image_idx[x:x + batch_size] for x in xrange(0, len(image_idx),
batch_size)):
dump_data = prepare_chunk(chunk, image_list,
image_width=input_dims[2],
image_height=input_dims[1],
channels=input_dims[0],
batch_size=batch_size,
**self.preprocessing_arguments)
f.write(dump_data)
f.closed
def set_data_preprocessing_parameters(self, input_dims, image_mean=None):
"""Set data pre-processing parameters for the int8 calibration."""
num_channels = input_dims[0]
if num_channels == 3:
if not image_mean:
means = [127.5, 127.5, 127.5]
else:
assert len(image_mean) == 3, "Image mean should have 3 values for RGB inputs."
means = image_mean
elif num_channels == 1:
if not image_mean:
means = [127]
else:
assert len(image_mean) == 1, "Image mean should have 1 value for grayscale inputs."
means = image_mean
else:
raise NotImplementedError(
f"Invalid number of dimensions {num_channels}.")
self.preprocessing_arguments = {"scale": 1.0 / 127.5,
"means": means,
"flip_channel": True}
| tao_deploy-main | nvidia_tao_deploy/cv/unet/engine_builder.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TAO Deploy UNet."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_deploy-main | nvidia_tao_deploy/cv/unet/__init__.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.