python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
---|---|---|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""IVA MakeNet root module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/makenet/__init__.py |
"""makenet docker entrypoint."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from maglev_sdk.docker_container.entrypoint import main
if __name__ == '__main__':
main('makenet', 'nvidia_tao_tf1/cv/makenet/scripts')
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/makenet/docker/makenet.py |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nvidia_tao_tf1/cv/makenet/proto/optimizer_config.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='nvidia_tao_tf1/cv/makenet/proto/optimizer_config.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n6nvidia_tao_tf1/cv/makenet/proto/optimizer_config.proto\"S\n\x12SgdOptimizerConfig\x12\n\n\x02lr\x18\x01 \x01(\x02\x12\r\n\x05\x64\x65\x63\x61y\x18\x02 \x01(\x02\x12\x10\n\x08momentum\x18\x03 \x01(\x02\x12\x10\n\x08nesterov\x18\x04 \x01(\x08\"a\n\x13\x41\x64\x61mOptimizerConfig\x12\n\n\x02lr\x18\x01 \x01(\x02\x12\x0e\n\x06\x62\x65ta_1\x18\x02 \x01(\x02\x12\x0e\n\x06\x62\x65ta_2\x18\x03 \x01(\x02\x12\x0f\n\x07\x65psilon\x18\x04 \x01(\x02\x12\r\n\x05\x64\x65\x63\x61y\x18\x05 \x01(\x02\"Q\n\x16RmspropOptimizerConfig\x12\n\n\x02lr\x18\x01 \x01(\x02\x12\x0b\n\x03rho\x18\x02 \x01(\x02\x12\x0f\n\x07\x65psilon\x18\x03 \x01(\x02\x12\r\n\x05\x64\x65\x63\x61y\x18\x04 \x01(\x02\"\x90\x01\n\x0fOptimizerConfig\x12\"\n\x03sgd\x18\x01 \x01(\x0b\x32\x13.SgdOptimizerConfigH\x00\x12$\n\x04\x61\x64\x61m\x18\x02 \x01(\x0b\x32\x14.AdamOptimizerConfigH\x00\x12*\n\x07rmsprop\x18\x03 \x01(\x0b\x32\x17.RmspropOptimizerConfigH\x00\x42\x07\n\x05optimb\x06proto3')
)
_SGDOPTIMIZERCONFIG = _descriptor.Descriptor(
name='SgdOptimizerConfig',
full_name='SgdOptimizerConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='lr', full_name='SgdOptimizerConfig.lr', index=0,
number=1, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='decay', full_name='SgdOptimizerConfig.decay', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='momentum', full_name='SgdOptimizerConfig.momentum', index=2,
number=3, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='nesterov', full_name='SgdOptimizerConfig.nesterov', index=3,
number=4, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=58,
serialized_end=141,
)
_ADAMOPTIMIZERCONFIG = _descriptor.Descriptor(
name='AdamOptimizerConfig',
full_name='AdamOptimizerConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='lr', full_name='AdamOptimizerConfig.lr', index=0,
number=1, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='beta_1', full_name='AdamOptimizerConfig.beta_1', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='beta_2', full_name='AdamOptimizerConfig.beta_2', index=2,
number=3, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='epsilon', full_name='AdamOptimizerConfig.epsilon', index=3,
number=4, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='decay', full_name='AdamOptimizerConfig.decay', index=4,
number=5, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=143,
serialized_end=240,
)
_RMSPROPOPTIMIZERCONFIG = _descriptor.Descriptor(
name='RmspropOptimizerConfig',
full_name='RmspropOptimizerConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='lr', full_name='RmspropOptimizerConfig.lr', index=0,
number=1, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='rho', full_name='RmspropOptimizerConfig.rho', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='epsilon', full_name='RmspropOptimizerConfig.epsilon', index=2,
number=3, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='decay', full_name='RmspropOptimizerConfig.decay', index=3,
number=4, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=242,
serialized_end=323,
)
_OPTIMIZERCONFIG = _descriptor.Descriptor(
name='OptimizerConfig',
full_name='OptimizerConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='sgd', full_name='OptimizerConfig.sgd', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='adam', full_name='OptimizerConfig.adam', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='rmsprop', full_name='OptimizerConfig.rmsprop', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='optim', full_name='OptimizerConfig.optim',
index=0, containing_type=None, fields=[]),
],
serialized_start=326,
serialized_end=470,
)
_OPTIMIZERCONFIG.fields_by_name['sgd'].message_type = _SGDOPTIMIZERCONFIG
_OPTIMIZERCONFIG.fields_by_name['adam'].message_type = _ADAMOPTIMIZERCONFIG
_OPTIMIZERCONFIG.fields_by_name['rmsprop'].message_type = _RMSPROPOPTIMIZERCONFIG
_OPTIMIZERCONFIG.oneofs_by_name['optim'].fields.append(
_OPTIMIZERCONFIG.fields_by_name['sgd'])
_OPTIMIZERCONFIG.fields_by_name['sgd'].containing_oneof = _OPTIMIZERCONFIG.oneofs_by_name['optim']
_OPTIMIZERCONFIG.oneofs_by_name['optim'].fields.append(
_OPTIMIZERCONFIG.fields_by_name['adam'])
_OPTIMIZERCONFIG.fields_by_name['adam'].containing_oneof = _OPTIMIZERCONFIG.oneofs_by_name['optim']
_OPTIMIZERCONFIG.oneofs_by_name['optim'].fields.append(
_OPTIMIZERCONFIG.fields_by_name['rmsprop'])
_OPTIMIZERCONFIG.fields_by_name['rmsprop'].containing_oneof = _OPTIMIZERCONFIG.oneofs_by_name['optim']
DESCRIPTOR.message_types_by_name['SgdOptimizerConfig'] = _SGDOPTIMIZERCONFIG
DESCRIPTOR.message_types_by_name['AdamOptimizerConfig'] = _ADAMOPTIMIZERCONFIG
DESCRIPTOR.message_types_by_name['RmspropOptimizerConfig'] = _RMSPROPOPTIMIZERCONFIG
DESCRIPTOR.message_types_by_name['OptimizerConfig'] = _OPTIMIZERCONFIG
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
SgdOptimizerConfig = _reflection.GeneratedProtocolMessageType('SgdOptimizerConfig', (_message.Message,), dict(
DESCRIPTOR = _SGDOPTIMIZERCONFIG,
__module__ = 'nvidia_tao_tf1.cv.makenet.proto.optimizer_config_pb2'
# @@protoc_insertion_point(class_scope:SgdOptimizerConfig)
))
_sym_db.RegisterMessage(SgdOptimizerConfig)
AdamOptimizerConfig = _reflection.GeneratedProtocolMessageType('AdamOptimizerConfig', (_message.Message,), dict(
DESCRIPTOR = _ADAMOPTIMIZERCONFIG,
__module__ = 'nvidia_tao_tf1.cv.makenet.proto.optimizer_config_pb2'
# @@protoc_insertion_point(class_scope:AdamOptimizerConfig)
))
_sym_db.RegisterMessage(AdamOptimizerConfig)
RmspropOptimizerConfig = _reflection.GeneratedProtocolMessageType('RmspropOptimizerConfig', (_message.Message,), dict(
DESCRIPTOR = _RMSPROPOPTIMIZERCONFIG,
__module__ = 'nvidia_tao_tf1.cv.makenet.proto.optimizer_config_pb2'
# @@protoc_insertion_point(class_scope:RmspropOptimizerConfig)
))
_sym_db.RegisterMessage(RmspropOptimizerConfig)
OptimizerConfig = _reflection.GeneratedProtocolMessageType('OptimizerConfig', (_message.Message,), dict(
DESCRIPTOR = _OPTIMIZERCONFIG,
__module__ = 'nvidia_tao_tf1.cv.makenet.proto.optimizer_config_pb2'
# @@protoc_insertion_point(class_scope:OptimizerConfig)
))
_sym_db.RegisterMessage(OptimizerConfig)
# @@protoc_insertion_point(module_scope)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/makenet/proto/optimizer_config_pb2.py |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nvidia_tao_tf1/cv/makenet/proto/training_config.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from nvidia_tao_tf1.cv.common.proto import visualizer_config_pb2 as nvidia__tao__tf1_dot_cv_dot_common_dot_proto_dot_visualizer__config__pb2
from nvidia_tao_tf1.cv.makenet.proto import lr_config_pb2 as nvidia__tao__tf1_dot_cv_dot_makenet_dot_proto_dot_lr__config__pb2
from nvidia_tao_tf1.cv.makenet.proto import optimizer_config_pb2 as nvidia__tao__tf1_dot_cv_dot_makenet_dot_proto_dot_optimizer__config__pb2
from nvidia_tao_tf1.cv.makenet.proto import regularizer_config_pb2 as nvidia__tao__tf1_dot_cv_dot_makenet_dot_proto_dot_regularizer__config__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='nvidia_tao_tf1/cv/makenet/proto/training_config.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n5nvidia_tao_tf1/cv/makenet/proto/training_config.proto\x1a\x36nvidia_tao_tf1/cv/common/proto/visualizer_config.proto\x1a/nvidia_tao_tf1/cv/makenet/proto/lr_config.proto\x1a\x36nvidia_tao_tf1/cv/makenet/proto/optimizer_config.proto\x1a\x38nvidia_tao_tf1/cv/makenet/proto/regularizer_config.proto\"\x83\x05\n\x0bTrainConfig\x12\x1a\n\x12train_dataset_path\x18\x01 \x01(\t\x12\x18\n\x10val_dataset_path\x18\x02 \x01(\t\x12\x1d\n\x15pretrained_model_path\x18\x03 \x01(\t\x12#\n\toptimizer\x18\x04 \x01(\x0b\x32\x10.OptimizerConfig\x12\x1a\n\x12\x62\x61tch_size_per_gpu\x18\x05 \x01(\r\x12\x10\n\x08n_epochs\x18\x06 \x01(\r\x12\x11\n\tn_workers\x18\x07 \x01(\r\x12\x1e\n\nreg_config\x18\x08 \x01(\x0b\x32\n.RegConfig\x12\x1c\n\tlr_config\x18\t \x01(\x0b\x32\t.LRConfig\x12\x13\n\x0brandom_seed\x18\n \x01(\r\x12\x1a\n\x12\x65nable_random_crop\x18\x0b \x01(\x08\x12\x1a\n\x12\x65nable_center_crop\x18\x0e \x01(\x08\x12!\n\x19\x65nable_color_augmentation\x18\x0f \x01(\x08\x12\x17\n\x0flabel_smoothing\x18\x0c \x01(\x02\x12\x17\n\x0fpreprocess_mode\x18\r \x01(\t\x12\x13\n\x0bmixup_alpha\x18\x10 \x01(\x02\x12\x19\n\x11model_parallelism\x18\x11 \x03(\x02\x12/\n\nimage_mean\x18\x12 \x03(\x0b\x32\x1b.TrainConfig.ImageMeanEntry\x12\x1f\n\x17\x64isable_horizontal_flip\x18\x13 \x01(\x08\x12%\n\nvisualizer\x18\x14 \x01(\x0b\x32\x11.VisualizerConfig\x1a\x30\n\x0eImageMeanEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x02:\x02\x38\x01\x62\x06proto3')
,
dependencies=[nvidia__tao__tf1_dot_cv_dot_common_dot_proto_dot_visualizer__config__pb2.DESCRIPTOR,nvidia__tao__tf1_dot_cv_dot_makenet_dot_proto_dot_lr__config__pb2.DESCRIPTOR,nvidia__tao__tf1_dot_cv_dot_makenet_dot_proto_dot_optimizer__config__pb2.DESCRIPTOR,nvidia__tao__tf1_dot_cv_dot_makenet_dot_proto_dot_regularizer__config__pb2.DESCRIPTOR,])
_TRAINCONFIG_IMAGEMEANENTRY = _descriptor.Descriptor(
name='ImageMeanEntry',
full_name='TrainConfig.ImageMeanEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='TrainConfig.ImageMeanEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='TrainConfig.ImageMeanEntry.value', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=_b('8\001'),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=872,
serialized_end=920,
)
_TRAINCONFIG = _descriptor.Descriptor(
name='TrainConfig',
full_name='TrainConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='train_dataset_path', full_name='TrainConfig.train_dataset_path', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='val_dataset_path', full_name='TrainConfig.val_dataset_path', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='pretrained_model_path', full_name='TrainConfig.pretrained_model_path', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='optimizer', full_name='TrainConfig.optimizer', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='batch_size_per_gpu', full_name='TrainConfig.batch_size_per_gpu', index=4,
number=5, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='n_epochs', full_name='TrainConfig.n_epochs', index=5,
number=6, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='n_workers', full_name='TrainConfig.n_workers', index=6,
number=7, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='reg_config', full_name='TrainConfig.reg_config', index=7,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='lr_config', full_name='TrainConfig.lr_config', index=8,
number=9, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='random_seed', full_name='TrainConfig.random_seed', index=9,
number=10, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='enable_random_crop', full_name='TrainConfig.enable_random_crop', index=10,
number=11, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='enable_center_crop', full_name='TrainConfig.enable_center_crop', index=11,
number=14, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='enable_color_augmentation', full_name='TrainConfig.enable_color_augmentation', index=12,
number=15, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='label_smoothing', full_name='TrainConfig.label_smoothing', index=13,
number=12, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='preprocess_mode', full_name='TrainConfig.preprocess_mode', index=14,
number=13, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='mixup_alpha', full_name='TrainConfig.mixup_alpha', index=15,
number=16, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='model_parallelism', full_name='TrainConfig.model_parallelism', index=16,
number=17, type=2, cpp_type=6, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='image_mean', full_name='TrainConfig.image_mean', index=17,
number=18, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='disable_horizontal_flip', full_name='TrainConfig.disable_horizontal_flip', index=18,
number=19, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='visualizer', full_name='TrainConfig.visualizer', index=19,
number=20, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_TRAINCONFIG_IMAGEMEANENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=277,
serialized_end=920,
)
_TRAINCONFIG_IMAGEMEANENTRY.containing_type = _TRAINCONFIG
_TRAINCONFIG.fields_by_name['optimizer'].message_type = nvidia__tao__tf1_dot_cv_dot_makenet_dot_proto_dot_optimizer__config__pb2._OPTIMIZERCONFIG
_TRAINCONFIG.fields_by_name['reg_config'].message_type = nvidia__tao__tf1_dot_cv_dot_makenet_dot_proto_dot_regularizer__config__pb2._REGCONFIG
_TRAINCONFIG.fields_by_name['lr_config'].message_type = nvidia__tao__tf1_dot_cv_dot_makenet_dot_proto_dot_lr__config__pb2._LRCONFIG
_TRAINCONFIG.fields_by_name['image_mean'].message_type = _TRAINCONFIG_IMAGEMEANENTRY
_TRAINCONFIG.fields_by_name['visualizer'].message_type = nvidia__tao__tf1_dot_cv_dot_common_dot_proto_dot_visualizer__config__pb2._VISUALIZERCONFIG
DESCRIPTOR.message_types_by_name['TrainConfig'] = _TRAINCONFIG
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
TrainConfig = _reflection.GeneratedProtocolMessageType('TrainConfig', (_message.Message,), dict(
ImageMeanEntry = _reflection.GeneratedProtocolMessageType('ImageMeanEntry', (_message.Message,), dict(
DESCRIPTOR = _TRAINCONFIG_IMAGEMEANENTRY,
__module__ = 'nvidia_tao_tf1.cv.makenet.proto.training_config_pb2'
# @@protoc_insertion_point(class_scope:TrainConfig.ImageMeanEntry)
))
,
DESCRIPTOR = _TRAINCONFIG,
__module__ = 'nvidia_tao_tf1.cv.makenet.proto.training_config_pb2'
# @@protoc_insertion_point(class_scope:TrainConfig)
))
_sym_db.RegisterMessage(TrainConfig)
_sym_db.RegisterMessage(TrainConfig.ImageMeanEntry)
_TRAINCONFIG_IMAGEMEANENTRY._options = None
# @@protoc_insertion_point(module_scope)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/makenet/proto/training_config_pb2.py |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nvidia_tao_tf1/cv/makenet/proto/regularizer_config.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='nvidia_tao_tf1/cv/makenet/proto/regularizer_config.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n8nvidia_tao_tf1/cv/makenet/proto/regularizer_config.proto\">\n\tRegConfig\x12\x0c\n\x04type\x18\x01 \x01(\t\x12\r\n\x05scope\x18\x02 \x01(\t\x12\x14\n\x0cweight_decay\x18\x03 \x01(\x02\x62\x06proto3')
)
_REGCONFIG = _descriptor.Descriptor(
name='RegConfig',
full_name='RegConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='type', full_name='RegConfig.type', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='scope', full_name='RegConfig.scope', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='weight_decay', full_name='RegConfig.weight_decay', index=2,
number=3, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=60,
serialized_end=122,
)
DESCRIPTOR.message_types_by_name['RegConfig'] = _REGCONFIG
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
RegConfig = _reflection.GeneratedProtocolMessageType('RegConfig', (_message.Message,), dict(
DESCRIPTOR = _REGCONFIG,
__module__ = 'nvidia_tao_tf1.cv.makenet.proto.regularizer_config_pb2'
# @@protoc_insertion_point(class_scope:RegConfig)
))
_sym_db.RegisterMessage(RegConfig)
# @@protoc_insertion_point(module_scope)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/makenet/proto/regularizer_config_pb2.py |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nvidia_tao_tf1/cv/makenet/proto/lr_config.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='nvidia_tao_tf1/cv/makenet/proto/lr_config.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n/nvidia_tao_tf1/cv/makenet/proto/lr_config.proto\"G\n\x0cStepLrConfig\x12\x15\n\rlearning_rate\x18\x01 \x01(\x02\x12\x11\n\tstep_size\x18\x02 \x01(\r\x12\r\n\x05gamma\x18\x03 \x01(\x02\"t\n\x12SoftAnnealLrConfig\x12\x15\n\rlearning_rate\x18\x01 \x01(\x02\x12\x12\n\nsoft_start\x18\x02 \x01(\x02\x12\x19\n\x11\x61nnealing_divider\x18\x03 \x01(\x02\x12\x18\n\x10\x61nnealing_points\x18\x07 \x03(\x02\"Q\n\x0e\x43osineLrConfig\x12\x15\n\rlearning_rate\x18\x01 \x01(\x02\x12\x14\n\x0cmin_lr_ratio\x18\x02 \x01(\x02\x12\x12\n\nsoft_start\x18\x03 \x01(\x02\"\x88\x01\n\x08LRConfig\x12\x1d\n\x04step\x18\x01 \x01(\x0b\x32\r.StepLrConfigH\x00\x12*\n\x0bsoft_anneal\x18\x02 \x01(\x0b\x32\x13.SoftAnnealLrConfigH\x00\x12!\n\x06\x63osine\x18\x03 \x01(\x0b\x32\x0f.CosineLrConfigH\x00\x42\x0e\n\x0clr_schedulerb\x06proto3')
)
_STEPLRCONFIG = _descriptor.Descriptor(
name='StepLrConfig',
full_name='StepLrConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='learning_rate', full_name='StepLrConfig.learning_rate', index=0,
number=1, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='step_size', full_name='StepLrConfig.step_size', index=1,
number=2, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='gamma', full_name='StepLrConfig.gamma', index=2,
number=3, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=51,
serialized_end=122,
)
_SOFTANNEALLRCONFIG = _descriptor.Descriptor(
name='SoftAnnealLrConfig',
full_name='SoftAnnealLrConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='learning_rate', full_name='SoftAnnealLrConfig.learning_rate', index=0,
number=1, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='soft_start', full_name='SoftAnnealLrConfig.soft_start', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='annealing_divider', full_name='SoftAnnealLrConfig.annealing_divider', index=2,
number=3, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='annealing_points', full_name='SoftAnnealLrConfig.annealing_points', index=3,
number=7, type=2, cpp_type=6, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=124,
serialized_end=240,
)
_COSINELRCONFIG = _descriptor.Descriptor(
name='CosineLrConfig',
full_name='CosineLrConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='learning_rate', full_name='CosineLrConfig.learning_rate', index=0,
number=1, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='min_lr_ratio', full_name='CosineLrConfig.min_lr_ratio', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='soft_start', full_name='CosineLrConfig.soft_start', index=2,
number=3, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=242,
serialized_end=323,
)
_LRCONFIG = _descriptor.Descriptor(
name='LRConfig',
full_name='LRConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='step', full_name='LRConfig.step', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='soft_anneal', full_name='LRConfig.soft_anneal', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='cosine', full_name='LRConfig.cosine', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='lr_scheduler', full_name='LRConfig.lr_scheduler',
index=0, containing_type=None, fields=[]),
],
serialized_start=326,
serialized_end=462,
)
_LRCONFIG.fields_by_name['step'].message_type = _STEPLRCONFIG
_LRCONFIG.fields_by_name['soft_anneal'].message_type = _SOFTANNEALLRCONFIG
_LRCONFIG.fields_by_name['cosine'].message_type = _COSINELRCONFIG
_LRCONFIG.oneofs_by_name['lr_scheduler'].fields.append(
_LRCONFIG.fields_by_name['step'])
_LRCONFIG.fields_by_name['step'].containing_oneof = _LRCONFIG.oneofs_by_name['lr_scheduler']
_LRCONFIG.oneofs_by_name['lr_scheduler'].fields.append(
_LRCONFIG.fields_by_name['soft_anneal'])
_LRCONFIG.fields_by_name['soft_anneal'].containing_oneof = _LRCONFIG.oneofs_by_name['lr_scheduler']
_LRCONFIG.oneofs_by_name['lr_scheduler'].fields.append(
_LRCONFIG.fields_by_name['cosine'])
_LRCONFIG.fields_by_name['cosine'].containing_oneof = _LRCONFIG.oneofs_by_name['lr_scheduler']
DESCRIPTOR.message_types_by_name['StepLrConfig'] = _STEPLRCONFIG
DESCRIPTOR.message_types_by_name['SoftAnnealLrConfig'] = _SOFTANNEALLRCONFIG
DESCRIPTOR.message_types_by_name['CosineLrConfig'] = _COSINELRCONFIG
DESCRIPTOR.message_types_by_name['LRConfig'] = _LRCONFIG
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
StepLrConfig = _reflection.GeneratedProtocolMessageType('StepLrConfig', (_message.Message,), dict(
DESCRIPTOR = _STEPLRCONFIG,
__module__ = 'nvidia_tao_tf1.cv.makenet.proto.lr_config_pb2'
# @@protoc_insertion_point(class_scope:StepLrConfig)
))
_sym_db.RegisterMessage(StepLrConfig)
SoftAnnealLrConfig = _reflection.GeneratedProtocolMessageType('SoftAnnealLrConfig', (_message.Message,), dict(
DESCRIPTOR = _SOFTANNEALLRCONFIG,
__module__ = 'nvidia_tao_tf1.cv.makenet.proto.lr_config_pb2'
# @@protoc_insertion_point(class_scope:SoftAnnealLrConfig)
))
_sym_db.RegisterMessage(SoftAnnealLrConfig)
CosineLrConfig = _reflection.GeneratedProtocolMessageType('CosineLrConfig', (_message.Message,), dict(
DESCRIPTOR = _COSINELRCONFIG,
__module__ = 'nvidia_tao_tf1.cv.makenet.proto.lr_config_pb2'
# @@protoc_insertion_point(class_scope:CosineLrConfig)
))
_sym_db.RegisterMessage(CosineLrConfig)
LRConfig = _reflection.GeneratedProtocolMessageType('LRConfig', (_message.Message,), dict(
DESCRIPTOR = _LRCONFIG,
__module__ = 'nvidia_tao_tf1.cv.makenet.proto.lr_config_pb2'
# @@protoc_insertion_point(class_scope:LRConfig)
))
_sym_db.RegisterMessage(LRConfig)
# @@protoc_insertion_point(module_scope)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/makenet/proto/lr_config_pb2.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Store protobuf definitions for MakeNet."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/makenet/proto/__init__.py |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nvidia_tao_tf1/cv/makenet/proto/experiment.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from nvidia_tao_tf1.cv.makenet.proto import model_config_pb2 as nvidia__tao__tf1_dot_cv_dot_makenet_dot_proto_dot_model__config__pb2
from nvidia_tao_tf1.cv.makenet.proto import training_config_pb2 as nvidia__tao__tf1_dot_cv_dot_makenet_dot_proto_dot_training__config__pb2
from nvidia_tao_tf1.cv.makenet.proto import eval_config_pb2 as nvidia__tao__tf1_dot_cv_dot_makenet_dot_proto_dot_eval__config__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='nvidia_tao_tf1/cv/makenet/proto/experiment.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n0nvidia_tao_tf1/cv/makenet/proto/experiment.proto\x1a\x32nvidia_tao_tf1/cv/makenet/proto/model_config.proto\x1a\x35nvidia_tao_tf1/cv/makenet/proto/training_config.proto\x1a\x31nvidia_tao_tf1/cv/makenet/proto/eval_config.proto\"v\n\nExperiment\x12 \n\x0b\x65val_config\x18\x01 \x01(\x0b\x32\x0b.EvalConfig\x12\"\n\x0cmodel_config\x18\x02 \x01(\x0b\x32\x0c.ModelConfig\x12\"\n\x0ctrain_config\x18\x03 \x01(\x0b\x32\x0c.TrainConfigb\x06proto3')
,
dependencies=[nvidia__tao__tf1_dot_cv_dot_makenet_dot_proto_dot_model__config__pb2.DESCRIPTOR,nvidia__tao__tf1_dot_cv_dot_makenet_dot_proto_dot_training__config__pb2.DESCRIPTOR,nvidia__tao__tf1_dot_cv_dot_makenet_dot_proto_dot_eval__config__pb2.DESCRIPTOR,])
_EXPERIMENT = _descriptor.Descriptor(
name='Experiment',
full_name='Experiment',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='eval_config', full_name='Experiment.eval_config', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='model_config', full_name='Experiment.model_config', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='train_config', full_name='Experiment.train_config', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=210,
serialized_end=328,
)
_EXPERIMENT.fields_by_name['eval_config'].message_type = nvidia__tao__tf1_dot_cv_dot_makenet_dot_proto_dot_eval__config__pb2._EVALCONFIG
_EXPERIMENT.fields_by_name['model_config'].message_type = nvidia__tao__tf1_dot_cv_dot_makenet_dot_proto_dot_model__config__pb2._MODELCONFIG
_EXPERIMENT.fields_by_name['train_config'].message_type = nvidia__tao__tf1_dot_cv_dot_makenet_dot_proto_dot_training__config__pb2._TRAINCONFIG
DESCRIPTOR.message_types_by_name['Experiment'] = _EXPERIMENT
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Experiment = _reflection.GeneratedProtocolMessageType('Experiment', (_message.Message,), dict(
DESCRIPTOR = _EXPERIMENT,
__module__ = 'nvidia_tao_tf1.cv.makenet.proto.experiment_pb2'
# @@protoc_insertion_point(class_scope:Experiment)
))
_sym_db.RegisterMessage(Experiment)
# @@protoc_insertion_point(module_scope)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/makenet/proto/experiment_pb2.py |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nvidia_tao_tf1/cv/makenet/proto/eval_config.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='nvidia_tao_tf1/cv/makenet/proto/eval_config.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n1nvidia_tao_tf1/cv/makenet/proto/eval_config.proto\"\x8d\x01\n\nEvalConfig\x12\r\n\x05top_k\x18\x01 \x01(\r\x12\x19\n\x11\x65val_dataset_path\x18\x02 \x01(\t\x12\x12\n\nmodel_path\x18\x03 \x01(\t\x12\x12\n\nbatch_size\x18\x04 \x01(\r\x12\x11\n\tn_workers\x18\x05 \x01(\r\x12\x1a\n\x12\x65nable_center_crop\x18\x06 \x01(\x08\x62\x06proto3')
)
_EVALCONFIG = _descriptor.Descriptor(
name='EvalConfig',
full_name='EvalConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='top_k', full_name='EvalConfig.top_k', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='eval_dataset_path', full_name='EvalConfig.eval_dataset_path', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='model_path', full_name='EvalConfig.model_path', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='batch_size', full_name='EvalConfig.batch_size', index=3,
number=4, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='n_workers', full_name='EvalConfig.n_workers', index=4,
number=5, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='enable_center_crop', full_name='EvalConfig.enable_center_crop', index=5,
number=6, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=54,
serialized_end=195,
)
DESCRIPTOR.message_types_by_name['EvalConfig'] = _EVALCONFIG
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
EvalConfig = _reflection.GeneratedProtocolMessageType('EvalConfig', (_message.Message,), dict(
DESCRIPTOR = _EVALCONFIG,
__module__ = 'nvidia_tao_tf1.cv.makenet.proto.eval_config_pb2'
# @@protoc_insertion_point(class_scope:EvalConfig)
))
_sym_db.RegisterMessage(EvalConfig)
# @@protoc_insertion_point(module_scope)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/makenet/proto/eval_config_pb2.py |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nvidia_tao_tf1/cv/makenet/proto/model_config.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='nvidia_tao_tf1/cv/makenet/proto/model_config.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n2nvidia_tao_tf1/cv/makenet/proto/model_config.proto\"4\n\x0f\x42\x61tchNormConfig\x12\x10\n\x08momentum\x18\x01 \x01(\x02\x12\x0f\n\x07\x65psilon\x18\x02 \x01(\x02\"\xa8\x01\n\nActivation\x12\x17\n\x0f\x61\x63tivation_type\x18\x01 \x01(\t\x12\x44\n\x15\x61\x63tivation_parameters\x18\x02 \x03(\x0b\x32%.Activation.ActivationParametersEntry\x1a;\n\x19\x41\x63tivationParametersEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x02:\x02\x38\x01\"\x8c\x03\n\x0bModelConfig\x12\x0c\n\x04\x61rch\x18\x01 \x01(\t\x12\x18\n\x10input_image_size\x18\x02 \x01(\t\x12\x39\n\x1bresize_interpolation_method\x18\x0c \x01(\x0e\x32\x14.InterpolationMethod\x12\x10\n\x08n_layers\x18\x03 \x01(\r\x12\x13\n\x0bretain_head\x18\x04 \x01(\x08\x12\x16\n\x0euse_batch_norm\x18\x05 \x01(\x08\x12\x10\n\x08use_bias\x18\x06 \x01(\x08\x12\x13\n\x0buse_pooling\x18\x07 \x01(\x08\x12\x17\n\x0f\x61ll_projections\x18\x08 \x01(\x08\x12\x11\n\tfreeze_bn\x18\t \x01(\x08\x12\x15\n\rfreeze_blocks\x18\n \x03(\r\x12\x0f\n\x07\x64ropout\x18\x0b \x01(\x02\x12+\n\x11\x62\x61tch_norm_config\x18\r \x01(\x0b\x32\x10.BatchNormConfig\x12\x1f\n\nactivation\x18\x0e \x01(\x0b\x32\x0b.Activation\x12\x12\n\nbyom_model\x18\x0f \x01(\t*0\n\x13InterpolationMethod\x12\x0c\n\x08\x42ILINEAR\x10\x00\x12\x0b\n\x07\x42ICUBIC\x10\x01\x62\x06proto3')
)
_INTERPOLATIONMETHOD = _descriptor.EnumDescriptor(
name='InterpolationMethod',
full_name='InterpolationMethod',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='BILINEAR', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='BICUBIC', index=1, number=1,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=678,
serialized_end=726,
)
_sym_db.RegisterEnumDescriptor(_INTERPOLATIONMETHOD)
InterpolationMethod = enum_type_wrapper.EnumTypeWrapper(_INTERPOLATIONMETHOD)
BILINEAR = 0
BICUBIC = 1
_BATCHNORMCONFIG = _descriptor.Descriptor(
name='BatchNormConfig',
full_name='BatchNormConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='momentum', full_name='BatchNormConfig.momentum', index=0,
number=1, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='epsilon', full_name='BatchNormConfig.epsilon', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=54,
serialized_end=106,
)
_ACTIVATION_ACTIVATIONPARAMETERSENTRY = _descriptor.Descriptor(
name='ActivationParametersEntry',
full_name='Activation.ActivationParametersEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='Activation.ActivationParametersEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='Activation.ActivationParametersEntry.value', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=_b('8\001'),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=218,
serialized_end=277,
)
_ACTIVATION = _descriptor.Descriptor(
name='Activation',
full_name='Activation',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='activation_type', full_name='Activation.activation_type', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='activation_parameters', full_name='Activation.activation_parameters', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_ACTIVATION_ACTIVATIONPARAMETERSENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=109,
serialized_end=277,
)
_MODELCONFIG = _descriptor.Descriptor(
name='ModelConfig',
full_name='ModelConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='arch', full_name='ModelConfig.arch', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='input_image_size', full_name='ModelConfig.input_image_size', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='resize_interpolation_method', full_name='ModelConfig.resize_interpolation_method', index=2,
number=12, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='n_layers', full_name='ModelConfig.n_layers', index=3,
number=3, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='retain_head', full_name='ModelConfig.retain_head', index=4,
number=4, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='use_batch_norm', full_name='ModelConfig.use_batch_norm', index=5,
number=5, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='use_bias', full_name='ModelConfig.use_bias', index=6,
number=6, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='use_pooling', full_name='ModelConfig.use_pooling', index=7,
number=7, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='all_projections', full_name='ModelConfig.all_projections', index=8,
number=8, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='freeze_bn', full_name='ModelConfig.freeze_bn', index=9,
number=9, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='freeze_blocks', full_name='ModelConfig.freeze_blocks', index=10,
number=10, type=13, cpp_type=3, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='dropout', full_name='ModelConfig.dropout', index=11,
number=11, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='batch_norm_config', full_name='ModelConfig.batch_norm_config', index=12,
number=13, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='activation', full_name='ModelConfig.activation', index=13,
number=14, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='byom_model', full_name='ModelConfig.byom_model', index=14,
number=15, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=280,
serialized_end=676,
)
_ACTIVATION_ACTIVATIONPARAMETERSENTRY.containing_type = _ACTIVATION
_ACTIVATION.fields_by_name['activation_parameters'].message_type = _ACTIVATION_ACTIVATIONPARAMETERSENTRY
_MODELCONFIG.fields_by_name['resize_interpolation_method'].enum_type = _INTERPOLATIONMETHOD
_MODELCONFIG.fields_by_name['batch_norm_config'].message_type = _BATCHNORMCONFIG
_MODELCONFIG.fields_by_name['activation'].message_type = _ACTIVATION
DESCRIPTOR.message_types_by_name['BatchNormConfig'] = _BATCHNORMCONFIG
DESCRIPTOR.message_types_by_name['Activation'] = _ACTIVATION
DESCRIPTOR.message_types_by_name['ModelConfig'] = _MODELCONFIG
DESCRIPTOR.enum_types_by_name['InterpolationMethod'] = _INTERPOLATIONMETHOD
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
BatchNormConfig = _reflection.GeneratedProtocolMessageType('BatchNormConfig', (_message.Message,), dict(
DESCRIPTOR = _BATCHNORMCONFIG,
__module__ = 'nvidia_tao_tf1.cv.makenet.proto.model_config_pb2'
# @@protoc_insertion_point(class_scope:BatchNormConfig)
))
_sym_db.RegisterMessage(BatchNormConfig)
Activation = _reflection.GeneratedProtocolMessageType('Activation', (_message.Message,), dict(
ActivationParametersEntry = _reflection.GeneratedProtocolMessageType('ActivationParametersEntry', (_message.Message,), dict(
DESCRIPTOR = _ACTIVATION_ACTIVATIONPARAMETERSENTRY,
__module__ = 'nvidia_tao_tf1.cv.makenet.proto.model_config_pb2'
# @@protoc_insertion_point(class_scope:Activation.ActivationParametersEntry)
))
,
DESCRIPTOR = _ACTIVATION,
__module__ = 'nvidia_tao_tf1.cv.makenet.proto.model_config_pb2'
# @@protoc_insertion_point(class_scope:Activation)
))
_sym_db.RegisterMessage(Activation)
_sym_db.RegisterMessage(Activation.ActivationParametersEntry)
ModelConfig = _reflection.GeneratedProtocolMessageType('ModelConfig', (_message.Message,), dict(
DESCRIPTOR = _MODELCONFIG,
__module__ = 'nvidia_tao_tf1.cv.makenet.proto.model_config_pb2'
# @@protoc_insertion_point(class_scope:ModelConfig)
))
_sym_db.RegisterMessage(ModelConfig)
_ACTIVATION_ACTIVATIONPARAMETERSENTRY._options = None
# @@protoc_insertion_point(module_scope)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/makenet/proto/model_config_pb2.py |
"""Utilities for ImageNet data preprocessing & prediction decoding."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
from keras import backend as K
import numpy as np
logger = logging.getLogger(__name__)
CLASS_INDEX = None
CLASS_INDEX_PATH = ('https://storage.googleapis.com/download.tensorflow.org/'
'data/imagenet_class_index.json')
# Global tensor of imagenet mean for preprocessing symbolic inputs
_IMAGENET_MEAN = None
# Keras constants.
_KERAS_BACKEND = K
_KERAS_LAYERS = None
_KERAS_MODELS = None
_KERAS_UTILS = None
def get_submodules_from_kwargs(kwargs):
"""Simple function to extract submodules from kwargs."""
backend = kwargs.get('backend', _KERAS_BACKEND)
layers = kwargs.get('layers', _KERAS_LAYERS)
models = kwargs.get('models', _KERAS_MODELS)
utils = kwargs.get('utils', _KERAS_UTILS)
for key in list(kwargs.keys()):
if key not in ['backend', 'layers', 'models', 'utils']:
raise TypeError('Invalid keyword argument: {}'.format(key))
return backend, layers, models, utils
def _preprocess_numpy_input(x, data_format, mode, color_mode, img_mean, **kwargs):
"""Preprocesses a Numpy array encoding a batch of images.
# Arguments
x: Input array, 3D or 4D.
data_format: Data format of the image array.
mode: One of "caffe", "tf" or "torch".
- caffe: will convert the images from RGB to BGR,
then will zero-center each color channel with
respect to the ImageNet dataset,
without scaling.
- tf: will scale pixels between -1 and 1,
sample-wise.
- torch: will scale pixels between 0 and 1 and then
will normalize each channel with respect to the
ImageNet dataset.
# Returns
Preprocessed Numpy array.
"""
backend, _, _, _ = get_submodules_from_kwargs(kwargs)
if not issubclass(x.dtype.type, np.floating):
x = x.astype(backend.floatx(), copy=False)
if mode == 'tf':
if img_mean and len(img_mean) > 0:
logger.debug("image_mean is ignored in tf mode.")
x /= 127.5
x -= 1.
return x
if mode == 'torch':
if img_mean and len(img_mean) > 0:
logger.debug("image_mean is ignored in torch mode.")
x /= 255.
if color_mode == "rgb":
mean = [0.485, 0.456, 0.406]
std = [0.224, 0.224, 0.224]
elif color_mode == "grayscale":
mean = [0.449]
std = [0.224]
else:
raise NotImplementedError("Invalid color mode: {}".format(color_mode))
else:
if color_mode == "rgb":
if data_format == 'channels_first':
# 'RGB'->'BGR'
if x.ndim == 3:
x = x[::-1, ...]
else:
x = x[:, ::-1, ...]
else:
# 'RGB'->'BGR'
x = x[..., ::-1]
if not img_mean:
mean = [103.939, 116.779, 123.68]
else:
assert len(img_mean) == 3, "image_mean must be a list of 3 values \
for RGB input."
mean = img_mean
std = None
else:
if not img_mean:
mean = [117.3786]
else:
assert len(img_mean) == 1, "image_mean must be a list of a single value \
for gray image input."
mean = img_mean
std = None
# Zero-center by mean pixel
if data_format == 'channels_first':
for idx in range(len(mean)):
if x.ndim == 3:
x[idx, :, :] -= mean[idx]
if std is not None:
x[idx, :, :] /= std[idx]
else:
x[:, idx, :, :] -= mean[idx]
if std is not None:
x[:, idx, :, :] /= std[idx]
else:
for idx in range(len(mean)):
x[..., idx] -= mean[idx]
if std is not None:
x[..., idx] /= std[idx]
return x
def _preprocess_symbolic_input(x, data_format, mode, color_mode, img_mean, **kwargs):
"""Preprocesses a tensor encoding a batch of images.
# Arguments
x: Input tensor, 3D or 4D.
data_format: Data format of the image tensor.
mode: One of "caffe", "tf" or "torch".
- caffe: will convert the images from RGB to BGR,
then will zero-center each color channel with
respect to the ImageNet dataset,
without scaling.
- tf: will scale pixels between -1 and 1,
sample-wise.
- torch: will scale pixels between 0 and 1 and then
will normalize each channel with respect to the
ImageNet dataset.
# Returns
Preprocessed tensor.
"""
global _IMAGENET_MEAN # noqa pylint: disable=global-statement
backend, _, _, _ = get_submodules_from_kwargs(kwargs)
if mode == 'tf':
if img_mean and len(img_mean) > 0:
logger.debug("image_mean is ignored in tf mode.")
x /= 127.5
x -= 1.
return x
if mode == 'torch':
if img_mean and len(img_mean) > 0:
logger.debug("image_mean is ignored in torch mode.")
x /= 255.
if color_mode == "rgb":
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
elif color_mode == "grayscale":
mean = [0.449]
std = [0.226]
else:
raise NotImplementedError("Invalid color mode: {}".format(color_mode))
else:
if color_mode == "rgb":
if data_format == 'channels_first':
# 'RGB'->'BGR'
if backend.ndim(x) == 3:
x = x[::-1, ...]
else:
x = x[:, ::-1, ...]
else:
# 'RGB'->'BGR'
x = x[..., ::-1]
if not img_mean:
mean = [103.939, 116.779, 123.68]
else:
assert len(img_mean) == 3, "image_mean must be a list of 3 values \
for RGB input."
mean = img_mean
std = None
else:
if not img_mean:
mean = [117.3786]
else:
assert len(img_mean) == 1, "image_mean must be a list of a single value \
for gray image input."
mean = img_mean
std = None
if _IMAGENET_MEAN is None:
_IMAGENET_MEAN = backend.constant(-np.array(mean))
# Zero-center by mean pixel
if backend.dtype(x) != backend.dtype(_IMAGENET_MEAN):
x = backend.bias_add(
x, backend.cast(_IMAGENET_MEAN, backend.dtype(x)),
data_format=data_format)
else:
x = backend.bias_add(x, _IMAGENET_MEAN, data_format)
if std is not None:
x /= std
return x
def preprocess_input(x, data_format=None, mode='caffe', color_mode="rgb", img_mean=None, **kwargs):
"""Preprocesses a tensor or Numpy array encoding a batch of images.
# Arguments
x: Input Numpy or symbolic tensor, 3D or 4D.
The preprocessed data is written over the input data
if the data types are compatible. To avoid this
behaviour, `numpy.copy(x)` can be used.
data_format: Data format of the image tensor/array.
mode: One of "caffe", "tf" or "torch".
- caffe: will convert the images from RGB to BGR,
then will zero-center each color channel with
respect to the ImageNet dataset,
without scaling.
- tf: will scale pixels between -1 and 1,
sample-wise.
- torch: will scale pixels between 0 and 1 and then
will normalize each channel with respect to the
ImageNet dataset.
# Returns
Preprocessed tensor or Numpy array.
# Raises
ValueError: In case of unknown `data_format` argument.
"""
backend, _, _, _ = get_submodules_from_kwargs(kwargs)
if data_format is None:
data_format = backend.image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format ' + str(data_format))
if isinstance(x, np.ndarray):
return _preprocess_numpy_input(x, data_format=data_format,
mode=mode, color_mode=color_mode,
img_mean=img_mean, **kwargs)
return _preprocess_symbolic_input(x, data_format=data_format,
mode=mode, color_mode=color_mode,
img_mean=img_mean, **kwargs)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/makenet/utils/preprocess_input.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilitiy module for MakeNet."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/makenet/utils/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Patch keras_preprocessing.image.utils.load_img() with cropping support."""
import random
import keras_preprocessing.image
from nvidia_tao_tf1.cv.makenet.utils.helper import color_augmentation
# padding size.
# We firstly resize to (target_width + CROP_PADDING, target_height + CROP_PADDING)
# , then crop to (target_width, target_height).
# for standard ImageNet size: 224x224 the ratio is 0.875(224 / (224 + 32)).
# but for EfficientNet B1-B7, larger resolution is used, hence this ratio
# is no longer 0.875
# ref:
# https://github.com/tensorflow/tpu/blob/r1.15/models/official/efficientnet/preprocessing.py#L110
CROP_PADDING = 32
COLOR_AUGMENTATION = False
def _set_color_augmentation(flag):
global COLOR_AUGMENTATION # pylint: disable=global-statement
COLOR_AUGMENTATION = flag
def load_and_crop_img(path, grayscale=False, color_mode='rgb', target_size=None,
interpolation='nearest'):
"""Wraps keras_preprocessing.image.utils.load_img() and adds cropping.
Cropping method enumarated in interpolation
# Arguments
path: Path to image file.
color_mode: One of "grayscale", "rgb", "rgba". Default: "rgb".
The desired image format.
target_size: Either `None` (default to original size)
or tuple of ints `(img_height, img_width)`.
interpolation: Interpolation and crop methods used to resample and crop the image
if the target size is different from that of the loaded image.
Methods are delimited by ":" where first part is interpolation and second is crop
e.g. "lanczos:random".
Supported interpolation methods are "nearest", "bilinear", "bicubic", "lanczos",
"box", "hamming" By default, "nearest" is used.
Supported crop methods are "none", "center", "random".
# Returns
A PIL Image instance.
# Raises
ImportError: if PIL is not available.
ValueError: if interpolation method is not supported.
"""
# Decode interpolation string. Allowed Crop methods: none, center, random
interpolation, crop = interpolation.split(":") \
if ":" in interpolation else (interpolation, "none")
if crop == "none":
return keras_preprocessing.image.utils.load_img(
path,
grayscale=grayscale,
color_mode=color_mode,
target_size=target_size,
interpolation=interpolation)
# Load original size image using Keras
img = keras_preprocessing.image.utils.load_img(
path,
grayscale=grayscale,
color_mode=color_mode,
target_size=None,
interpolation=interpolation)
# Crop fraction of total image
target_width = target_size[1]
target_height = target_size[0]
if target_size is not None:
if img.size != (target_width, target_height):
if crop not in ["center", "random"]:
raise ValueError('Invalid crop method %s specified.' % crop)
if interpolation not in keras_preprocessing.image.utils._PIL_INTERPOLATION_METHODS:
raise ValueError(
'Invalid interpolation method {} specified. Supported '
'methods are {}'.format(
interpolation,
", ".join(
keras_preprocessing.image.utils._PIL_INTERPOLATION_METHODS.keys())))
resample = keras_preprocessing.image.utils._PIL_INTERPOLATION_METHODS[interpolation]
width, height = img.size
if crop == 'random':
# Resize keeping aspect ratio
# result should be no smaller than the targer size, include crop fraction overhead
crop_fraction = random.uniform(0.45, 1.0)
target_size_before_crop = (
target_width / crop_fraction,
target_height / crop_fraction
)
ratio = max(
target_size_before_crop[0] / width,
target_size_before_crop[1] / height
)
target_size_before_crop_keep_ratio = int(width * ratio), int(height * ratio)
img = img.resize(target_size_before_crop_keep_ratio, resample=resample)
if crop == 'center':
# Resize keeping aspect ratio
# result should be no smaller than the targer size, include crop fraction overhead
target_size_before_crop = (
target_width + CROP_PADDING,
target_height + CROP_PADDING
)
ratio = max(
target_size_before_crop[0] / width,
target_size_before_crop[1] / height
)
target_size_before_crop_keep_ratio = int(width * ratio), int(height * ratio)
img = img.resize(target_size_before_crop_keep_ratio, resample=resample)
width, height = img.size
if crop == "center":
left_corner = int(round(width/2)) - int(round(target_width/2))
top_corner = int(round(height/2)) - int(round(target_height/2))
return img.crop(
(left_corner,
top_corner,
left_corner + target_width,
top_corner + target_height))
if crop == "random":
# random crop
left_shift = random.randint(0, int((width - target_width)))
down_shift = random.randint(0, int((height - target_height)))
img = img.crop(
(left_shift,
down_shift,
target_width + left_shift,
target_height + down_shift))
# color augmentation
if COLOR_AUGMENTATION and img.mode == "RGB":
return color_augmentation(img)
return img
raise ValueError("Crop mode not supported.")
return img
# Monkey patch
keras_preprocessing.image.iterator.load_img = load_and_crop_img
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/makenet/utils/preprocess_crop.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mixup augmentation."""
import numpy as np
class MixupImageDataGenerator():
"""Mixup image generator."""
def __init__(
self, generator, directory, batch_size,
img_height, img_width, color_mode="rgb",
interpolation="bilinear", alpha=0.2,
classes=None
):
"""Constructor for mixup image data generator.
Arguments:
generator (object): An instance of Keras ImageDataGenerator.
directory (str): Image directory.
batch_size (int): Batch size.
img_height (int): Image height in pixels.
img_width (int): Image width in pixels.
color_mode (string): Color mode of images.
interpolation (string): Interpolation method for resize.
alpha (float): Mixup beta distribution alpha parameter. (default: {0.2})
`generator` (ImageDataGenerator).(default: {None})
classes (list): List of input classes
"""
self.batch_index = 0
self.batch_size = batch_size
self.alpha = alpha
# First iterator yielding tuples of (x, y)
self.generator = generator.flow_from_directory(
directory,
target_size=(img_height, img_width),
color_mode=color_mode,
batch_size=self.batch_size,
interpolation=interpolation,
shuffle=True,
class_mode='categorical',
classes=classes
)
# Number of images across all classes in image directory.
self.num_samples = self.generator.samples
self.class_list = classes
def reset_index(self):
"""Reset the generator indexes array."""
self.generator._set_index_array()
def on_epoch_end(self):
"""reset index on epoch end."""
self.reset_index()
def reset(self):
"""reset."""
self.batch_index = 0
def __len__(self):
"""length."""
return (self.num_samples + self.batch_size - 1) // self.batch_size
def get_steps_per_epoch(self):
"""Get number of steps per epoch."""
return self.num_samples // self.batch_size
def __next__(self):
"""Get next batch input/output pair.
Returns:
tuple -- batch of input/output pair, (inputs, outputs).
"""
if self.batch_index == 0:
self.reset_index()
current_index = (self.batch_index * self.batch_size) % self.num_samples
if self.num_samples > current_index + self.batch_size:
self.batch_index += 1
else:
self.batch_index = 0
# random sample the lambda value from beta distribution.
if self.alpha > 0:
# Get a pair of inputs and outputs from the batch and its reversed batch.
X1, y1 = self.generator.next()
# in case the dataset has some garbage, the real batch size
# might be smaller than self.batch_size
_l = np.random.beta(self.alpha, self.alpha, X1.shape[0])
_l = np.maximum(_l, 1.0 - _l)
X_l = _l.reshape(X1.shape[0], 1, 1, 1)
y_l = _l.reshape(X1.shape[0], 1)
X2, y2 = np.flip(X1, 0), np.flip(y1, 0)
# Perform the mixup.
X = X1 * X_l + X2 * (1 - X_l)
y = y1 * y_l + y2 * (1 - y_l)
else:
# alpha == 0 essentially disable mixup
X, y = self.generator.next()
return X, y
def __iter__(self):
"""iterator."""
while True:
return next(self)
@property
def num_classes(self):
"""number of classes."""
return self.generator.num_classes
@property
def class_indices(self):
"""class indices."""
return self.generator.class_indices
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/makenet/utils/mixup_generator.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Callbacks: utilities called at certain points during model training."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import warnings
from keras.callbacks import ModelCheckpoint
from nvidia_tao_tf1.cv.common.utils import encode_from_keras
class AdvModelCheckpoint(ModelCheckpoint):
"""Save the encrypted model after every epoch.
Attributes:
ENC_KEY: API key to encrypt the model.
epocs_since_last_save: Number of epochs since model was last saved.
save_best_only: Flag to save model with best accuracy.
best: saved instance of best model.
verbose: Enable verbose messages.
"""
def __init__(self, filepath, key, monitor='val_loss', verbose=0,
save_best_only=False, save_weights_only=False,
mode='auto', period=1):
"""Initialization with encryption key."""
super(AdvModelCheckpoint, self).__init__(filepath)
self._ENC_KEY = key
self.epochs_since_last_save = 0
def on_epoch_end(self, epoch, logs=None):
"""Called at the end of an epoch."""
logs = logs or {}
self.epochs_since_last_save += 1
if self.epochs_since_last_save >= self.period:
self.epochs_since_last_save = 0
filepath = self.filepath.format(epoch=epoch + 1, **logs)
if self.save_best_only:
current = logs.get(self.monitor)
if current is None:
warnings.warn('Can save best model only with %s available,'
' skipping.' % (self.monitor),
RuntimeWarning)
else:
if self.monitor_op(current, self.best):
if self.verbose > 0:
print('\nEpoch %05d: %s improved from %0.5f to '
'%0.5f, saving model to %s'
% (epoch + 1, self.monitor, self.best,
current, filepath))
self.best = current
if filepath.endswith(".hdf5"):
self.model.save(filepath, overwrite=True)
else:
encode_from_keras(self.model, filepath, self._ENC_KEY)
else:
if self.verbose > 0:
print('\nEpoch %05d: %s did not improve from %0.5f'
% (epoch + 1, self.monitor, self.best))
else:
if self.verbose > 0:
print('\nEpoch %05d: saving model to %s'
% (epoch + 1, filepath))
if str(filepath).endswith(".hdf5"):
self.model.save(str(filepath), overwrite=True)
else:
encode_from_keras(self.model, str(filepath), self._ENC_KEY)
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/makenet/utils/callbacks.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Collection of helper functions."""
import os
import cv2
import keras
from keras.utils.generic_utils import CustomObjectScope
from numba import jit, njit
import numpy as np
from PIL import Image
from nvidia_tao_tf1.cv.common.utils import (
CUSTOM_OBJS,
decode_to_keras,
MultiGPULearningRateScheduler,
restore_eff,
SoftStartCosineAnnealingScheduler,
StepLRScheduler
)
opt_dict = {
'sgd': keras.optimizers.SGD,
'adam': keras.optimizers.Adam,
'rmsprop': keras.optimizers.RMSprop
}
scope_dict = {'dense': keras.layers.Dense,
'conv2d': keras.layers.Conv2D}
regularizer_dict = {'l1': keras.regularizers.l1,
'l2': keras.regularizers.l2}
def build_optimizer(optimizer_config):
"""build optimizer with the optimizer config."""
if optimizer_config.WhichOneof("optim") == "sgd":
return opt_dict["sgd"](
lr=optimizer_config.sgd.lr,
momentum=optimizer_config.sgd.momentum,
decay=optimizer_config.sgd.decay,
nesterov=optimizer_config.sgd.nesterov
)
if optimizer_config.WhichOneof("optim") == "adam":
return opt_dict["adam"](
lr=optimizer_config.adam.lr,
beta_1=optimizer_config.adam.beta_1,
beta_2=optimizer_config.adam.beta_2,
epsilon=optimizer_config.adam.epsilon,
decay=optimizer_config.adam.decay
)
if optimizer_config.WhichOneof("optim") == "rmsprop":
return opt_dict["rmsprop"](
lr=optimizer_config.rmsprop.lr,
rho=optimizer_config.rmsprop.rho,
epsilon=optimizer_config.rmsprop.epsilon,
decay=optimizer_config.rmsprop.decay
)
raise ValueError("Unsupported Optimizer: {}".format(optimizer_config.WhichOneof("optim")))
def build_lr_scheduler(lr_config, hvd_size, max_iterations):
"""Build a learning rate scheduler from config."""
# Set up the learning rate callback. It will modulate learning rate
# based on iteration progress to reach max_iterations.
if lr_config.WhichOneof("lr_scheduler") == 'step':
lrscheduler = StepLRScheduler(
base_lr=lr_config.step.learning_rate * hvd_size,
gamma=lr_config.step.gamma,
step_size=lr_config.step.step_size,
max_iterations=max_iterations
)
elif lr_config.WhichOneof("lr_scheduler") == 'soft_anneal':
lrscheduler = MultiGPULearningRateScheduler(
base_lr=lr_config.soft_anneal.learning_rate * hvd_size,
soft_start=lr_config.soft_anneal.soft_start,
annealing_points=lr_config.soft_anneal.annealing_points,
annealing_divider=lr_config.soft_anneal.annealing_divider,
max_iterations=max_iterations
)
elif lr_config.WhichOneof("lr_scheduler") == 'cosine':
lrscheduler = SoftStartCosineAnnealingScheduler(
base_lr=lr_config.cosine.learning_rate * hvd_size,
min_lr_ratio=lr_config.cosine.min_lr_ratio,
soft_start=lr_config.cosine.soft_start,
max_iterations=max_iterations
)
else:
raise ValueError(
"Only `step`, `cosine` and `soft_anneal` ",
"LR scheduler are supported."
)
return lrscheduler
def get_input_shape(model):
"""Obtain input shape from a Keras model."""
data_format = model.layers[1].data_format
# Computing shape of input tensor
image_shape = model.layers[0].input_shape[1:4]
# Setting input shape
if data_format == "channels_first":
nchannels, image_height, image_width = image_shape[0:3]
else:
image_height, image_width, nchannels = image_shape[0:3]
return image_height, image_width, nchannels
def model_io(model_path, enc_key=None, custom_objs=None):
"""Simple utility to handle model file based on file extensions.
Args:
pretrained_model_file (str): Path to the model file.
enc_key (str): Key to load tlt file.
custom_objects (dict): Custom objects for serialization and deserialization.
Returns:
model (keras.models.Model): Loaded keras model.
"""
assert os.path.exists(model_path), "Pretrained model not found at {}".format(model_path)
if model_path.endswith('.tlt'):
assert enc_key is not None, "Key must be provided to load the model."
return decode_to_keras(str(model_path), enc_key.encode(), custom_objects=custom_objs)
if model_path.endswith('.hdf5'):
return keras.models.load_model(str(model_path), compile=False)
raise NotImplementedError("Invalid model file extension. {}".format(model_path))
@njit
def randu(low, high):
"""standard uniform distribution."""
return np.random.random()*(high-low) + low
@jit
def random_hue(img, max_delta=10.0):
"""
Rotates the hue channel.
Args:
img: input image in float32
max_delta: Max number of degrees to rotate the hue channel
"""
# Rotates the hue channel by delta degrees
delta = randu(-max_delta, max_delta)
hsv = cv2.cvtColor(img.astype(np.float32), cv2.COLOR_BGR2HSV)
hchannel = hsv[:, :, 0]
hchannel = delta + hchannel
# hue should always be within [0,360]
idx = np.where(hchannel > 360)
hchannel[idx] = hchannel[idx] - 360
idx = np.where(hchannel < 0)
hchannel[idx] = hchannel[idx] + 360
hsv[:, :, 0] = hchannel
return cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
@jit
def random_saturation(img, max_shift):
"""random saturation data augmentation."""
hsv = cv2.cvtColor(img.astype(np.float32), cv2.COLOR_BGR2HSV)
shift = randu(-max_shift, max_shift)
# saturation should always be within [0,1.0]
hsv[:, :, 1] = np.clip(hsv[:, :, 1]+shift, 0.0, 1.0)
return cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
@jit
def random_contrast(img, center, max_contrast_scale):
"""random contrast data augmentation."""
new_img = (img-center)*(1.0 + randu(-max_contrast_scale, max_contrast_scale)) \
+ center
new_img = np.clip(new_img, 0., 1.)
return new_img
@jit
def random_shift(x_img, shift_stddev):
"""random shift data augmentation."""
shift = np.random.randn()*shift_stddev
new_img = np.clip(x_img + shift, 0.0, 1.0)
return new_img
def color_augmentation(
x_img,
color_shift_stddev=0.0,
hue_rotation_max=25.0,
saturation_shift_max=0.2,
contrast_center=0.5,
contrast_scale_max=0.1
):
"""color augmentation for images."""
# convert PIL Image to numpy array
x_img = np.array(x_img, dtype=np.float32)
# normalize the image to (0, 1)
x_img /= 255.0
x_img = random_shift(x_img, color_shift_stddev)
x_img = random_hue(x_img, max_delta=hue_rotation_max)
x_img = random_saturation(x_img, saturation_shift_max)
x_img = random_contrast(
x_img,
contrast_center,
contrast_scale_max
)
# convert back to PIL Image
x_img *= 255.0
return Image.fromarray(x_img.astype(np.uint8), "RGB")
def setup_config(model, reg_config, freeze_bn=False, bn_config=None, custom_objs=None):
"""Wrapper for setting up BN and regularizer.
Args:
model (keras Model): a Keras model
reg_config: reg_config from the Proto file
freeze_bn(bool): Flag to freeze BN layers in this model
bn_config(proto): config to override BatchNormalization parameters
custom_objects (dict): Custom objects for serialization and deserialization.
Return:
A new model with overriden config.
"""
# set training=False for BN layers if freeze_bn=True
# otherwise the freeze_bn flag in model builder will be ineffective
def compose_call(prev_call_method):
def call(self, inputs, training=False):
return prev_call_method(self, inputs, training)
return call
prev_batchnorm_call = keras.layers.normalization.BatchNormalization.call
if freeze_bn:
keras.layers.normalization.BatchNormalization.call = compose_call(
prev_batchnorm_call
)
if bn_config is not None:
bn_momentum = bn_config.momentum
bn_epsilon = bn_config.epsilon
else:
bn_momentum = 0.9
bn_epsilon = 1e-5
# Obtain the current configuration from model
mconfig = model.get_config()
# Obtain type and scope of the regularizer
reg_type = reg_config.type.lower()
scope_list = reg_config.scope.split(',')
layer_list = [scope_dict[i.lower()] for i in scope_list if i.lower()
in scope_dict]
for layer, layer_config in zip(model.layers, mconfig['layers']):
# BN settings
if type(layer) == keras.layers.BatchNormalization:
layer_config['config']['momentum'] = bn_momentum
layer_config['config']['epsilon'] = bn_epsilon
# Regularizer settings
if reg_type:
if type(layer) in layer_list and \
hasattr(layer, 'kernel_regularizer'):
assert reg_type in ['l1', 'l2', 'none'], \
"Regularizer can only be either L1, L2 or None."
if reg_type in ['l1', 'l2']:
assert 0 < reg_config.weight_decay < 1, \
"Weight decay should be no less than 0 and less than 1"
regularizer = regularizer_dict[reg_type](
reg_config.weight_decay)
layer_config['config']['kernel_regularizer'] = \
{'class_name': regularizer.__class__.__name__,
'config': regularizer.get_config()}
if reg_type == 'none':
layer_config['config']['kernel_regularizer'] = None
if custom_objs:
CUSTOM_OBJS.update(custom_objs)
with CustomObjectScope(CUSTOM_OBJS):
updated_model = keras.models.Model.from_config(mconfig)
updated_model.set_weights(model.get_weights())
# restore the BN call method before return
if freeze_bn:
keras.layers.normalization.BatchNormalization.call = prev_batchnorm_call
return updated_model
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/makenet/utils/helper.py |
"""Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/makenet/scripts/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Export a classification model."""
# import build_command_line_parser as this is needed by entrypoint
from nvidia_tao_tf1.cv.common.export.app import build_command_line_parser as global_parser # noqa pylint: disable=W0611
from nvidia_tao_tf1.cv.common.export.app import run_export
import nvidia_tao_tf1.cv.common.logging.logging as status_logging
from nvidia_tao_tf1.cv.makenet.export.classification_exporter import ClassificationExporter \
as Exporter
def build_command_line_parser(parser=None):
"""Simple function to build the command line parser."""
args_parser = global_parser(parser=parser)
args_parser.add_argument(
"--classmap_json",
help="UNIX path to classmap.json file generated during classification <train>",
default=None,
type=str,
)
return args_parser
def parse_command_line(args=None):
"""Parse command line arguments."""
parser = build_command_line_parser(parser=None)
return vars(parser.parse_known_args(args)[0])
def main(args=None):
"""Run export for classification."""
try:
args = parse_command_line(args=args)
# Forcing export to ONNX by default.
backend = 'onnx'
run_export(Exporter, args=args, backend=backend)
status_logging.get_status_logger().write(
status_level=status_logging.Status.SUCCESS,
message="Export finished successfully."
)
except (KeyboardInterrupt, SystemExit):
status_logging.get_status_logger().write(
message="Export was interrupted",
verbosity_level=status_logging.Verbosity.INFO,
status_level=status_logging.Status.FAILURE
)
except Exception as e:
status_logging.get_status_logger().write(
message=str(e),
status_level=status_logging.Status.FAILURE
)
raise e
if __name__ == "__main__":
main()
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/makenet/scripts/export.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Dump training samples for INT8 inference."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
from functools import partial
import logging
import os
import sys
from keras.preprocessing.image import ImageDataGenerator
from tqdm import trange
from nvidia_tao_tf1.core.export import TensorFile
from nvidia_tao_tf1.cv.makenet.spec_handling.spec_loader import load_experiment_spec
from nvidia_tao_tf1.cv.makenet.utils import preprocess_crop # noqa pylint: disable=unused-import
from nvidia_tao_tf1.cv.makenet.utils.preprocess_input import preprocess_input
# Defining multiple image extensions.
SUPPORTED_IMAGE_EXTENSIONS = [".jpg", ".png", ".jpeg"]
logger = logging.getLogger(__name__)
def build_command_line_parser(parser=None):
"""Function to build command line parser."""
if parser is None:
parser = argparse.ArgumentParser(
description="Dump training samples for INT8")
parser.add_argument("-o", "--output", type=str, required=True,
help="Path to the output TensorFile", default=None)
parser.add_argument("-e", "--experiment_spec", type=str, required=True,
help="Path to the experiment spec file.", default=None)
parser.add_argument("-m", "--max_batches", type=int,
help="Number of batches", default=1)
parser.add_argument("-v", "--verbose", action='store_true',
help='Set the verbosity of the log.')
parser.add_argument("--use_validation_set", action='store_true',
help="Set to use only validation set.")
return parser
def parse_command_line(args=None):
"""Parse command line arguments for dumping image samples for INT8 calibration."""
parser = build_command_line_parser(parser=None)
arguments = vars(parser.parse_args(args))
return arguments
def dump_samples(output_path, config_file=None,
n_batches=1, verbosity=False,
use_validation_set=False):
"""Main wrapper function for dumping image samples.
Args:
output_path (str): Directory the output tensorfile should be written.
config_file (str): Path to experiment config file.
n_batches (int): Number of batches to be dumped.
verbosity (bool): Enable verbose logs.
use_validation_set (bool): Flag to use training or validation set.
"""
# Configure the logger.
logging.basicConfig(
format='%(asctime)s [TAO Toolkit] [%(levelname)s] %(name)s %(lineno)d: %(message)s',
level='DEBUG' if verbosity else 'INFO')
if config_file is not None:
# Create an experiment_pb2.Experiment object from the input file.
assert os.path.exists(config_file), "Config file not found at {}".format(config_file)
logger.info("Loading experiment spec at {}".format(config_file))
# The spec file in the config path has to be complete.
# Default spec is not merged into es.
es = load_experiment_spec(config_file, merge_from_default=False)
else:
logger.info("Loading the default experiment spec file.")
es = load_experiment_spec()
# Extract the training config.
train_config = es.train_config
model_config = es.model_config
# Define data dimensions.
image_shape = model_config.input_image_size.split(',')
n_channel = int(image_shape[0])
image_height = int(image_shape[1])
image_width = int(image_shape[2])
assert n_channel in [1, 3], "Invalid input image dimension."
assert image_height >= 16, "Image height should be greater than 15 pixels."
assert image_width >= 16, "Image width should be greater than 15 pixels."
img_mean = es.train_config.image_mean
if n_channel == 3:
if img_mean:
assert all(c in img_mean for c in ['r', 'g', 'b']) , (
"'r', 'g', 'b' should all be present in image_mean "
"for images with 3 channels."
)
img_mean = [img_mean['b'], img_mean['g'], img_mean['r']]
else:
img_mean = [103.939, 116.779, 123.68]
else:
if img_mean:
assert 'l' in img_mean, (
"'l' should be present in image_mean for images "
"with 1 channel."
)
img_mean = [img_mean['l']]
else:
img_mean = [117.3786]
# Define path to dataset.
train_data = train_config.train_dataset_path
if use_validation_set:
train_data = train_config.val_dataset_path
batch_size = train_config.batch_size_per_gpu
# Setting dataloader color_mode.
color_mode = "rgb"
if n_channel == 1:
color_mode = "grayscale"
preprocessing_func = partial(
preprocess_input,
data_format="channels_first",
mode=train_config.preprocess_mode,
color_mode=color_mode,
img_mean=img_mean)
# Initialize the data generator.
logger.info("Setting up input generator.")
train_datagen = ImageDataGenerator(preprocessing_function=preprocessing_func,
horizontal_flip=False,
featurewise_center=False)
logger.debug("Setting up iterator.")
train_iterator = train_datagen.flow_from_directory(train_data,
target_size=(image_height,
image_width),
batch_size=batch_size,
class_mode='categorical',
color_mode=color_mode)
# Total_num_samples.
num_samples = train_iterator.n
num_avail_batches = int(num_samples / batch_size)
assert n_batches <= num_avail_batches, "n_batches <= num_available_batches, n_batches={}, " \
"num_available_batches={}".format(n_batches,
num_avail_batches)
# Make the output directory.
dir_path = os.path.dirname(output_path)
if not os.path.exists(dir_path):
logger.info("Output directory not found. Creating at {}".format(dir_path))
os.makedirs(dir_path)
# Write data per batch.
if os.path.exists(output_path):
raise ValueError("A previously generated tensorfile already exists in the output path. "
"Please delete this file before writing a new one.")
tensorfile = TensorFile(os.path.join(output_path), 'w')
# Setting tqdm iterator.
tr = trange(n_batches, file=sys.stdout)
tr.set_description("Writing calibration tensorfile")
for _ in tr:
image, _ = next(train_iterator)
tensorfile.write(image)
tensorfile.close()
logger.info("Calibration tensorfile written.")
def main(cl_args=None):
"""Main function for the trt calibration samples."""
args = parse_command_line(args=cl_args)
dump_samples(args["output"],
args["experiment_spec"],
args["max_batches"],
args['verbose'],
args['use_validation_set'])
if __name__ == '__main__':
main()
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/makenet/scripts/calibration_tensorfile.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Makenet training script with protobuf configuration."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
from functools import partial
import json
import logging
import os
import sys
from keras import backend as K
from keras.preprocessing.image import ImageDataGenerator
from PIL import ImageFile
import six
import tensorflow as tf
from nvidia_tao_tf1.core.utils import set_random_seed
from nvidia_tao_tf1.cv.common.callbacks.loggers import TAOStatusLogger
import nvidia_tao_tf1.cv.common.logging.logging as status_logging
from nvidia_tao_tf1.cv.common.mlops.clearml import get_clearml_task
from nvidia_tao_tf1.cv.common.model_parallelism.parallelize_model import model_parallelism
from nvidia_tao_tf1.cv.common.utils import check_tf_oom, hvd_keras, restore_eff
from nvidia_tao_tf1.cv.common.utils import OneIndexedCSVLogger as CSVLogger
from nvidia_tao_tf1.cv.common.utils import TensorBoard
from nvidia_tao_tf1.cv.makenet.model.model_builder import get_model
from nvidia_tao_tf1.cv.makenet.spec_handling.spec_loader import load_experiment_spec
from nvidia_tao_tf1.cv.makenet.utils import preprocess_crop # noqa pylint: disable=unused-import
from nvidia_tao_tf1.cv.makenet.utils.callbacks import AdvModelCheckpoint
from nvidia_tao_tf1.cv.makenet.utils.helper import (
build_lr_scheduler,
build_optimizer,
model_io,
setup_config
)
from nvidia_tao_tf1.cv.makenet.utils.mixup_generator import MixupImageDataGenerator
from nvidia_tao_tf1.cv.makenet.utils.preprocess_input import preprocess_input
ImageFile.LOAD_TRUNCATED_IMAGES = True
FLAGS = tf.app.flags.FLAGS
logger = logging.getLogger(__name__)
verbose = 0
hvd = None
def eval_str(s):
"""If s is a string, return the eval results. Else return itself."""
if isinstance(s, six.string_types):
if len(s) > 0:
return eval(s)
return None
return s
def build_command_line_parser(parser=None):
'''Parse command line arguments.'''
if parser is None:
parser = argparse.ArgumentParser(prog='train',
description='Train a classification model.')
parser.add_argument(
'-e',
'--experiment_spec_file',
type=str,
required=True,
help='Path to the experiment spec file.')
parser.add_argument(
'-r',
'--results_dir',
required=True,
type=str,
help='Path to a folder where experiment outputs should be written.'
)
parser.add_argument(
'-k',
'--key',
required=False,
default="",
type=str,
help='Key to save or load a .tlt model.'
)
parser.add_argument(
'--init_epoch',
default=1,
type=int,
help='Set resume epoch.'
)
parser.add_argument(
'-v',
'--verbose',
action='store_true',
help='Include this flag in command line invocation for verbose logs.'
)
parser.add_argument(
'-c',
'--classmap',
help="Class map file to set the class indices of the model.",
type=str,
default=None
)
return parser
def parse_command_line(args=None):
"""Simple function to parse command line arguments."""
parser = build_command_line_parser(args)
return parser.parse_args(args)
def setup_callbacks(model_name, results_dir, lr_config,
init_epoch, iters_per_epoch, max_epoch, key,
hvd, weight_histograms=False):
"""Setup callbacks: tensorboard, checkpointer, lrscheduler, csvlogger.
Args:
model_name (str): name of the model used.
results_dir (str): Path to a folder where various training outputs will
be written.
lr_config: config derived from the Proto config file
init_epoch (int): The number of epoch to resume training.
key: encryption key
hvd: horovod instance
weight_histograms (bool): Enabled weight histograms in the tensorboard callback.
Returns:
callbacks (list of keras.callbacks): list of callbacks.
"""
callbacks = [hvd.callbacks.BroadcastGlobalVariablesCallback(0),
hvd.callbacks.MetricAverageCallback()]
max_iterations = iters_per_epoch * max_epoch
lrscheduler = build_lr_scheduler(lr_config, hvd.size(), max_iterations)
init_step = (init_epoch - 1) * iters_per_epoch
lrscheduler.reset(init_step)
callbacks.append(lrscheduler)
if hvd.rank() == 0:
# Set up the checkpointer.
save_weights_dir = os.path.join(results_dir, 'weights')
if not os.path.exists(save_weights_dir):
os.makedirs(save_weights_dir)
# Save encrypted models
weight_filename = os.path.join(save_weights_dir,
'%s_{epoch:03d}.hdf5' % model_name)
checkpointer = AdvModelCheckpoint(weight_filename, key, verbose=1)
callbacks.append(checkpointer)
# Set up the custom TensorBoard callback. It will log the loss
# after every step, and some images and user-set summaries only on
# the first step of every epoch. Align this with other keras
# networks.
tensorboard_dir = os.path.join(results_dir, "events")
if not os.path.exists(tensorboard_dir):
os.makedirs(tensorboard_dir)
tensorboard = TensorBoard(
log_dir=tensorboard_dir,
weight_hist=weight_histograms
)
callbacks.append(tensorboard)
# Set up the CSV logger, logging statistics after every epoch.
csvfilename = os.path.join(results_dir, 'training.csv')
csvlogger = CSVLogger(csvfilename,
separator=',',
append=False)
callbacks.append(csvlogger)
status_logger = TAOStatusLogger(
results_dir,
append=True,
num_epochs=max_epoch,
is_master=hvd.rank() == 0,
)
callbacks.append(status_logger)
return callbacks
def verify_dataset_classes(dataset_path, classes):
"""Verify whether classes are in the dataset.
Args:
dataset_path (str): Path to the dataset.
classes (list): List of classes.
Returns:
No explicit returns.
"""
dataset_classlist = [
item for item in os.listdir(dataset_path)
if os.path.isdir(os.path.join(dataset_path, item))
]
missed_classes = []
for class_name in classes:
if class_name not in dataset_classlist:
missed_classes.append(class_name)
assert not missed_classes, (
"Some classes mentioned in the classmap file were "
f"missing in the dataset at {dataset_path}. "
f"\n Missed classes are {missed_classes}"
)
def categorical_crossentropy(y_true, y_pred, from_logits=False, label_smoothing=0):
"""Ported tf.keras categorical_crossentropy."""
y_pred = K.constant(y_pred) if not K.is_tensor(y_pred) else y_pred
y_true = K.cast(y_true, y_pred.dtype)
if label_smoothing == 0:
smoothing = K.cast_to_floatx(label_smoothing)
def _smooth_labels():
num_classes = K.cast(K.shape(y_true)[1], y_pred.dtype)
return y_true * (1.0 - smoothing) + (smoothing / num_classes)
y_true = K.switch(K.greater(smoothing, 0), _smooth_labels, lambda: y_true)
return K.categorical_crossentropy(y_true, y_pred, from_logits=from_logits)
def load_data(train_data, val_data, preprocessing_func,
image_height, image_width, batch_size,
enable_random_crop=False, enable_center_crop=False,
enable_color_augmentation=False,
interpolation=0, color_mode="rgb",
mixup_alpha=0.0, no_horizontal_flip=False,
classmap=None):
"""Load training and validation data with default data augmentation.
Args:
train_data (str): path to the training data.
val_data (str): path to the validation data.
preprocessing_func: function to process an image.
image_height (int): Height of the input image tensor.
image_width (int): Width of the input image tensor.
batch_size (int): Number of image tensors per batch.
enable_random_crop (bool): Flag to enable random cropping in load_img.
enable_center_crop (bool): Flag to enable center cropping for val.
enable_color_augmentation(bool): Flag to enable color augmentation.
interpolation(int): Interpolation method for image resize. 0 means bilinear,
while 1 means bicubic.
color_mode (str): Input image read mode as either `rgb` or `grayscale`.
mixup_alpha (float): mixup alpha.
no_horizontal_flip(bool): Flag to disable horizontal flip for
direction-aware datasets.
classmap (str): Path to classmap file.
Return:
train/val Iterators and number of classes in the dataset.
"""
interpolation_map = {
0: "bilinear",
1: "bicubic"
}
interpolation = interpolation_map[interpolation]
classes = None
if classmap is not None:
classes = get_classes_from_classmap(classmap)
verify_dataset_classes(train_data, classes)
verify_dataset_classes(val_data, classes)
# set color augmentation properly for train.
# this global var will not affect validation dataset because
# the crop method is either "none" or "center" for val dataset,
# while this color augmentation is only possible for "random" crop.
if enable_color_augmentation:
preprocess_crop._set_color_augmentation(True)
# Initializing data generator : Train
train_datagen = ImageDataGenerator(
preprocessing_function=preprocessing_func,
horizontal_flip=not no_horizontal_flip,
featurewise_center=False)
train_iterator = MixupImageDataGenerator(
train_datagen, train_data, batch_size,
image_height, image_width,
color_mode=color_mode,
interpolation=interpolation + ':random' if enable_random_crop else interpolation,
alpha=mixup_alpha,
classes=classes
)
logger.info('Processing dataset (train): {}'.format(train_data))
# Initializing data generator: Val
val_datagen = ImageDataGenerator(
preprocessing_function=preprocessing_func,
horizontal_flip=False)
# Initializing data iterator: Val
val_iterator = val_datagen.flow_from_directory(
val_data,
target_size=(image_height, image_width),
color_mode=color_mode,
batch_size=batch_size,
interpolation=interpolation + ':center' if enable_center_crop else interpolation,
shuffle=False,
classes=classes,
class_mode='categorical')
logger.info('Processing dataset (validation): {}'.format(val_data))
# Check if the number of classes is > 1
assert train_iterator.num_classes > 1, \
"Number of classes should be greater than 1. Consider adding a background class."
# Check if the number of classes is consistent
assert train_iterator.num_classes == val_iterator.num_classes, \
"Number of classes in train and val don't match."
return train_iterator, val_iterator, train_iterator.num_classes
def get_classes_from_classmap(classmap_path):
"""Get list of classes from classmap file.
Args:
classmap_path (str): Path to the classmap file.
Returns:
classes (list): List of classes
"""
if not os.path.exists(classmap_path):
raise FileNotFoundError(
f"Class map file wasn't found at {classmap_path}"
)
with open(classmap_path, "r") as cmap_file:
try:
data = json.load(cmap_file)
except json.decoder.JSONDecodeError as e:
print(f"Loading the {classmap_path} failed with error\n{e}")
sys.exit(-1)
except Exception as e:
if e.output is not None:
print(f"Classification exporter failed with error {e.output}")
sys.exit(-1)
if not data:
return []
classes = [""] * len(list(data.keys()))
if not all([isinstance(value, int) for value in data.values()]):
raise RuntimeError(
"The values in the classmap file should be int."
"Please verify the contents of the classmap file."
)
if not all([class_index < len(classes)
and isinstance(class_index, int)
for class_index in data.values()]):
raise RuntimeError(
"Invalid data in the json file. The class index must "
"be < number of classes and an integer value.")
for classname, class_index in data.items():
classes[class_index] = classname
return classes
def run_experiment(config_path=None, results_dir=None,
key=None, init_epoch=1, verbosity=False,
classmap=None):
"""Launch experiment that trains the model.
NOTE: Do not change the argument names without verifying that cluster
submission works.
Args:
config_path (str): Path to a text file containing a complete experiment
configuration.
results_dir (str): Path to a folder where various training outputs will
be written.
If the folder does not already exist, it will be created.
init_epoch (int): The number of epoch to resume training.
classmap (str): Path to the classmap file.
"""
# Horovod: initialize Horovod.
hvd = hvd_keras()
hvd.init()
# Load experiment spec.
if config_path is not None:
# Create an experiment_pb2.Experiment object from the input file.
logger.info("Loading experiment spec at %s.", config_path)
# The spec in config_path has to be complete.
# Default spec is not merged into es.
es = load_experiment_spec(config_path, merge_from_default=False)
else:
logger.info("Loading the default experiment spec.")
es = load_experiment_spec()
model_config = es.model_config
train_config = es.train_config
# Horovod: pin GPU to be used to process local rank (one GPU per process)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
# check if model parallelism is enabled or not
if train_config.model_parallelism:
world_size = len(train_config.model_parallelism)
else:
world_size = 1
gpus = list(range(hvd.local_rank() * world_size, (hvd.local_rank() + 1) * world_size))
config.gpu_options.visible_device_list = ','.join([str(x) for x in gpus])
K.set_session(tf.Session(config=config))
verbose = 1 if hvd.rank() == 0 else 0
K.set_image_data_format('channels_first')
# Configure the logger.
logging.basicConfig(
format='%(asctime)s [TAO Toolkit] [%(levelname)s] %(name)s %(lineno)d: %(message)s',
level='DEBUG' if verbosity else 'INFO')
# Set random seed.
logger.debug("Random seed is set to {}".format(train_config.random_seed))
set_random_seed(train_config.random_seed + hvd.local_rank())
is_master = hvd.rank() == 0
if is_master and not os.path.exists(results_dir):
os.makedirs(results_dir)
status_file = os.path.join(results_dir, "status.json")
status_logging.set_status_logger(
status_logging.StatusLogger(
filename=status_file,
is_master=is_master,
verbosity=logger.getEffectiveLevel(),
append=True
)
)
# Configure tf logger verbosity.
tf.logging.set_verbosity(tf.logging.INFO)
weight_histograms = False
if hvd.rank() == 0:
if train_config.HasField("visualizer"):
weight_histograms = train_config.visualizer.weight_histograms
if train_config.visualizer.HasField("clearml_config"):
clearml_config = train_config.visualizer.clearml_config
_ = get_clearml_task(clearml_config, "classification")
# get channel, height and width of the input image
nchannels, image_height, image_width = map(int, model_config.input_image_size.split(','))
assert nchannels in [1, 3], "Invalid input image dimension."
assert image_height >= 16, "Image height should be greater than 15 pixels."
assert image_width >= 16, "Image width should be greater than 15 pixels."
if nchannels == 3:
color_mode = "rgb"
else:
color_mode = "grayscale"
img_mean = train_config.image_mean
if train_config.preprocess_mode in ['tf', 'torch'] and img_mean:
logger.info("Custom image mean is only supported in `caffe` mode.")
logger.info("Custom image mean will be ignored.")
if train_config.preprocess_mode == 'caffe':
mode_txt = 'Custom'
if nchannels == 3:
if img_mean:
assert all(c in img_mean for c in ['r', 'g', 'b']) , (
"'r', 'g', 'b' should all be present in image_mean "
"for images with 3 channels."
)
img_mean = [img_mean['b'], img_mean['g'], img_mean['r']]
else:
mode_txt = 'Default'
img_mean = [103.939, 116.779, 123.68]
else:
if img_mean:
assert 'l' in img_mean, (
"'l' should be present in image_mean for images "
"with 1 channel."
)
img_mean = [img_mean['l']]
else:
mode_txt = 'Default'
img_mean = [117.3786]
logger.info("{} image mean {} will be used.".format(mode_txt, img_mean))
# Load augmented data
train_iterator, val_iterator, nclasses = \
load_data(train_config.train_dataset_path,
train_config.val_dataset_path,
partial(preprocess_input,
data_format='channels_first',
mode=train_config.preprocess_mode,
img_mean=img_mean,
color_mode=color_mode),
image_height, image_width,
train_config.batch_size_per_gpu,
train_config.enable_random_crop,
train_config.enable_center_crop,
train_config.enable_color_augmentation,
model_config.resize_interpolation_method,
color_mode=color_mode,
mixup_alpha=train_config.mixup_alpha,
no_horizontal_flip=train_config.disable_horizontal_flip,
classmap=classmap)
# Creating model
ka = dict()
ka['nlayers'] = model_config.n_layers if model_config.n_layers else 18
ka['use_batch_norm'] = model_config.use_batch_norm
ka['use_pooling'] = model_config.use_pooling
ka['freeze_bn'] = model_config.freeze_bn
ka['use_bias'] = model_config.use_bias
ka['all_projections'] = model_config.all_projections
ka['dropout'] = model_config.dropout if model_config.dropout else 0.0
ka['activation'] = model_config.activation
freeze_blocks = model_config.freeze_blocks if model_config.freeze_blocks else None
ka['passphrase'] = key
final_model = get_model(arch=model_config.arch if model_config.arch else "resnet",
input_shape=(nchannels, image_height, image_width),
data_format='channels_first',
nclasses=nclasses,
retain_head=model_config.retain_head,
freeze_blocks=freeze_blocks,
**ka)
# Set up BN and regularizer config
if model_config.HasField("batch_norm_config"):
bn_config = model_config.batch_norm_config
else:
bn_config = None
final_model = setup_config(
final_model,
train_config.reg_config,
freeze_bn=model_config.freeze_bn,
bn_config=bn_config,
custom_objs={}
)
if train_config.pretrained_model_path:
# Decrypt and load pretrained model
pretrained_model = model_io(train_config.pretrained_model_path, enc_key=key)
strict_mode = True
for layer in pretrained_model.layers[1:]:
# The layer must match up to ssd layers.
if layer.name == 'predictions':
strict_mode = False
try:
l_return = final_model.get_layer(layer.name)
except ValueError:
# Some layers are not there
continue
try:
l_return.set_weights(layer.get_weights())
except ValueError:
if strict_mode:
# This is a pruned model
final_model = setup_config(
pretrained_model,
train_config.reg_config,
bn_config=bn_config
)
# model parallelism, keep the freeze_bn config untouched when building
# a new parallilized model
if train_config.model_parallelism:
final_model = model_parallelism(
final_model,
tuple(train_config.model_parallelism),
model_config.freeze_bn
)
# Printing model summary
final_model.summary()
if init_epoch > 1 and not train_config.pretrained_model_path:
raise ValueError("Make sure to load the correct model when setting initial epoch > 1.")
if train_config.pretrained_model_path and init_epoch > 1:
opt = pretrained_model.optimizer
else:
# Defining optimizer
opt = build_optimizer(train_config.optimizer)
# Add Horovod Distributed Optimizer
opt = hvd.DistributedOptimizer(opt)
# Compiling model
cc = partial(categorical_crossentropy, label_smoothing=train_config.label_smoothing)
cc.__name__ = "categorical_crossentropy"
final_model.compile(loss=cc, metrics=['accuracy'],
optimizer=opt)
callbacks = setup_callbacks(model_config.arch, results_dir,
train_config.lr_config,
init_epoch, len(train_iterator) // hvd.size(),
train_config.n_epochs, key,
hvd, weight_histograms=weight_histograms)
# Writing out class-map file for inference mapping
if hvd.rank() == 0:
with open(os.path.join(results_dir, "classmap.json"), "w") \
as classdump:
json.dump(train_iterator.class_indices, classdump)
# Commencing Training
final_model.fit_generator(
train_iterator,
steps_per_epoch=len(train_iterator) // hvd.size(),
epochs=train_config.n_epochs,
verbose=verbose,
workers=train_config.n_workers,
validation_data=val_iterator,
validation_steps=len(val_iterator),
callbacks=callbacks,
initial_epoch=init_epoch - 1)
# Evaluate the model on the full data set.
status_logging.get_status_logger().write(message="Final model evaluation in progress.")
score = hvd.allreduce(
final_model.evaluate_generator(val_iterator,
len(val_iterator),
workers=train_config.n_workers))
kpi_data = {
"validation_loss": float(score[0]),
"validation_accuracy": float(score[1])
}
status_logging.get_status_logger().kpi = kpi_data
status_logging.get_status_logger().write(message="Model evaluation is complete.")
if verbose:
logger.info('Total Val Loss: {}'.format(score[0]))
logger.info('Total Val accuracy: {}'.format(score[1]))
status_logging.get_status_logger().write(
status_level=status_logging.Status.SUCCESS,
message="Training finished successfully."
)
@check_tf_oom
def main(args=None):
"""Wrapper function for continuous training of MakeNet application.
Args:
Dictionary arguments containing parameters defined by command line
parameters.
"""
# parse command line
args = parse_command_line(args)
try:
run_experiment(config_path=args.experiment_spec_file,
results_dir=args.results_dir,
key=args.key,
init_epoch=args.init_epoch,
verbosity=args.verbose,
classmap=args.classmap)
logger.info("Training finished successfully.")
except (KeyboardInterrupt, SystemExit):
status_logging.get_status_logger().write(
message="Training was interrupted",
verbosity_level=status_logging.Verbosity.INFO,
status_level=status_logging.Status.FAILURE
)
logger.info("Training was interrupted.")
except Exception as e:
status_logging.get_status_logger().write(
message=str(e),
status_level=status_logging.Status.FAILURE
)
raise e
if __name__ == '__main__':
main()
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/makenet/scripts/train.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Script to prune the classification TLT model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import nvidia_tao_tf1.cv.common.logging.logging as status_logging
from nvidia_tao_tf1.cv.common.magnet_prune import ( # noqa pylint: disable=unused-import
build_command_line_parser,
main,
)
if __name__ == "__main__":
try:
main(sys.argv[1:])
status_logging.get_status_logger().write(
status_level=status_logging.Status.SUCCESS,
message="Pruning finished successfully."
)
except (KeyboardInterrupt, SystemExit):
status_logging.get_status_logger().write(
message="Pruning was interrupted",
verbosity_level=status_logging.Verbosity.INFO,
status_level=status_logging.Status.FAILURE
)
except Exception as e:
status_logging.get_status_logger().write(
message=str(e),
status_level=status_logging.Status.FAILURE
)
raise e
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/makenet/scripts/prune.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Inference and metrics computation code using a loaded model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import json
import logging
import os
import numpy as np
import pandas as pd
import nvidia_tao_tf1.cv.common.logging.logging as status_logging
from nvidia_tao_tf1.cv.common.utils import check_tf_oom, restore_eff
from nvidia_tao_tf1.cv.makenet.spec_handling.spec_loader import load_experiment_spec
from nvidia_tao_tf1.cv.makenet.utils.helper import get_input_shape, model_io, setup_config
from nvidia_tao_tf1.cv.makenet.utils.preprocess_crop import load_and_crop_img
from nvidia_tao_tf1.cv.makenet.utils.preprocess_input import preprocess_input
logger = logging.getLogger(__name__)
VALID_IMAGE_EXT = ['.jpg', '.jpeg', '.png']
def build_command_line_parser(parser=None):
'''Parse command line arguments.'''
if parser is None:
parser = argparse.ArgumentParser(
description="Standalone classification inference tool")
parser.add_argument('-m', '--model_path',
type=str,
help="Path to the pretrained model (.tlt).",
required=True)
parser.add_argument('-k',
'--key',
required=False,
default="",
type=str,
help='Key to load a .tlt model.')
parser.add_argument('-i', '--image',
type=str,
help="Path to the inference image.")
parser.add_argument('-d', '--image_dir',
type=str,
help="Path to the inference image directory.")
parser.add_argument('-e',
'--experiment_spec',
required=True,
type=str,
help='Path to the experiment spec file.')
parser.add_argument('-b', '--batch_size',
type=int,
default=1,
help="Inference batch size.")
parser.add_argument('-cm', '--classmap',
type=str,
help="Path to the classmap file generated from training.",
default=None,
required=True)
parser.add_argument('-v', '--verbose',
action='store_true',
help='Include this flag in command line invocation for\
verbose logs.')
parser.add_argument('-r',
"--results_dir",
type=str,
default=None,
help=argparse.SUPPRESS)
return parser
def parse_command_line(args=None):
"""Simple function to parse command line arguments."""
parser = build_command_line_parser(args)
return parser.parse_args(args)
def batch_generator(iterable, batch_size=1):
"""Load a list of image paths in batches.
Args:
iterable: a list of image paths
n: batch size
"""
total_len = len(iterable)
for ndx in range(0, total_len, batch_size):
yield iterable[ndx:min(ndx + batch_size, total_len)]
def preprocess(imgpath, image_height,
image_width, nchannels=3,
mode='caffe',
img_mean=None,
interpolation='nearest',
data_format='channels_first'):
"""Preprocess a single image.
It includes resizing, normalization based on imagenet
"""
# Open image and preprocessing
color_mode = 'rgb' if nchannels == 3 else 'grayscale'
image = load_and_crop_img(
imgpath,
grayscale=False,
color_mode=color_mode,
target_size=(image_height, image_width),
interpolation=interpolation,
)
image = np.array(image).astype(np.float32)
return preprocess_input(image.transpose((2, 0, 1)),
mode=mode, color_mode=color_mode,
img_mean=img_mean,
data_format=data_format)
def load_image_batch(batch, image_height,
image_width, nchannels=3,
mode='caffe',
img_mean=None,
interpolation='nearest',
data_format='channels_first'):
"""Group the preprocessed images in a batch."""
ph = np.zeros(
(len(batch), nchannels, image_height, image_width),
dtype=np.float32)
for i, imgpath in enumerate(batch):
ph[i, :, :, :] = preprocess(imgpath, image_height, image_width,
nchannels=nchannels, mode=mode,
img_mean=img_mean,
interpolation=interpolation,
data_format=data_format)
return ph
def inference(args=None):
"""Inference on an image/directory using a pretrained model file.
Args:
args: Dictionary arguments containing parameters defined by command
line parameters.
Log:
Image Mode:
print classifier output
Directory Mode:
write out a .csv file to store all the predictions
"""
# Set up status logging
if args.results_dir:
if not os.path.exists(args.results_dir):
os.makedirs(args.results_dir)
status_file = os.path.join(args.results_dir, "status.json")
status_logging.set_status_logger(
status_logging.StatusLogger(
filename=status_file,
is_master=True,
verbosity=1,
append=True
)
)
s_logger = status_logging.get_status_logger()
s_logger.write(
status_level=status_logging.Status.STARTED,
message="Starting inference."
)
# Set up logger verbosity.
verbosity = 'INFO'
if args.verbose:
verbosity = 'DEBUG'
if not (args.image or args.image_dir):
s_logger.write(
status_level=status_logging.Status.FAILURE,
message="Provide either image file or a directory of images."
)
return
# Configure the logger.
logging.basicConfig(
format='%(asctime)s [TAO Toolkit] [%(levelname)s] %(name)s %(lineno)d: %(message)s',
level=verbosity)
# Load experiment spec.
if args.experiment_spec is not None:
# Create an experiment_pb2.Experiment object from the input file.
logger.info("Loading experiment spec at %s.", args.experiment_spec)
# The spec in config_path has to be complete.
# Default spec is not merged into es.
es = load_experiment_spec(args.experiment_spec,
merge_from_default=False,
validation_schema="validation")
else:
logger.info("Loading the default experiment spec.")
es = load_experiment_spec(validation_schema="validation")
# override BN config
if es.model_config.HasField("batch_norm_config"):
bn_config = es.model_config.batch_norm_config
else:
bn_config = None
custom_objs = {}
# Decrypt and load the pretrained model
model = model_io(args.model_path, enc_key=args.key, custom_objs=custom_objs)
# reg_config and freeze_bn are actually not useful, just use bn_config
# so the BN layer's output produces correct result.
# of course, only the BN epsilon matters in evaluation.
model = setup_config(
model,
es.train_config.reg_config,
freeze_bn=es.model_config.freeze_bn,
bn_config=bn_config,
custom_objs=custom_objs
)
# Printing summary of retrieved model
model.summary()
# Get input shape
image_height, image_width, nchannels = get_input_shape(model)
with open(args.classmap, "r") as cm:
class_dict = json.load(cm)
interpolation = es.model_config.resize_interpolation_method
interpolation_map = {
0: "bilinear",
1: "bicubic"
}
interpolation = interpolation_map[interpolation]
if es.eval_config.enable_center_crop:
interpolation += ":center"
img_mean = es.train_config.image_mean
if nchannels == 3:
if img_mean:
assert all(c in img_mean for c in ['r', 'g', 'b']) , (
"'r', 'g', 'b' should all be present in image_mean "
"for images with 3 channels."
)
img_mean = [img_mean['b'], img_mean['g'], img_mean['r']]
else:
img_mean = [103.939, 116.779, 123.68]
else:
if img_mean:
assert 'l' in img_mean, (
"'l' should be present in image_mean for images "
"with 1 channel."
)
img_mean = [img_mean['l']]
else:
img_mean = [117.3786]
if args.image:
logger.info("Processing {}...".format(args.image))
# Load and preprocess image
infer_input = preprocess(args.image, image_height, image_width,
nchannels=nchannels,
mode=es.train_config.preprocess_mode,
img_mean=img_mean,
interpolation=interpolation)
infer_input.shape = (1, ) + infer_input.shape
# Keras inference
raw_predictions = model.predict(infer_input, batch_size=1)
logger.debug("Raw prediction: \n{}".format(raw_predictions))
# Class output from softmax layer
class_index = np.argmax(raw_predictions)
print("Current predictions: {}".format(raw_predictions))
print("Class label = {}".format(class_index))
# Label Name
class_name = list(class_dict.keys())[list(class_dict.values()).index(class_index)]
print("Class name = {}".format(class_name))
if args.image_dir:
logger.info("Processing {}...".format(args.image_dir))
# Preparing list of inference files.
result_csv_path = os.path.join(args.image_dir, 'result.csv')
if args.results_dir:
result_csv_path = os.path.join(args.results_dir, 'result.csv')
csv_f = open(result_csv_path, 'w')
imgpath_list = [os.path.join(root, filename)
for root, subdirs, files in os.walk(args.image_dir)
for filename in files
if os.path.splitext(filename)[1].lower()
in VALID_IMAGE_EXT
]
if not imgpath_list:
s_logger.write(
status_level=status_logging.Status.FAILURE,
message="Image directory doesn't contain files with valid extensions" +
"Valid extensions are " + str(VALID_IMAGE_EXT)
)
return
# Generator in batch mode
for img_batch in batch_generator(imgpath_list, args.batch_size):
# Load images in batch
infer_batch = load_image_batch(img_batch,
image_height,
image_width,
nchannels=nchannels,
interpolation=interpolation,
img_mean=img_mean,
mode=es.train_config.preprocess_mode)
# Run inference
raw_predictions = model.predict(infer_batch,
batch_size=args.batch_size)
logger.debug("Raw prediction: \n{}".format(raw_predictions))
# Class output from softmax layer
class_indices = np.argmax(raw_predictions, axis=1)
# Map label index to label name
class_labels = map(lambda i: list(class_dict.keys())
[list(class_dict.values()).index(i)],
class_indices)
conf = np.max(raw_predictions, axis=1)
# Write predictions to file
df = pd.DataFrame(zip(list(img_batch), class_labels, conf))
df.to_csv(csv_f, header=False, index=False)
logger.info("Inference complete. Result is saved at {}".format(
result_csv_path))
if args.results_dir:
s_logger.write(
status_level=status_logging.Status.SUCCESS,
message="Inference finished successfully."
)
csv_f.close()
@check_tf_oom
def main(args=None):
"""Run inference on a single image or collection of images.
Args:
args: Dictionary arguments containing parameters defined by command
line parameters.
"""
try:
# parse command line
args = parse_command_line(args)
inference(args)
except (KeyboardInterrupt, SystemExit):
status_logging.get_status_logger().write(
message="Inference convert was interrupted",
verbosity_level=status_logging.Verbosity.INFO,
status_level=status_logging.Status.FAILURE
)
except Exception as e:
status_logging.get_status_logger().write(
message=str(e),
status_level=status_logging.Status.FAILURE
)
raise e
if __name__ == "__main__":
main()
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/makenet/scripts/inference.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Perform Makenet Evaluation on IVA car make dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
from functools import partial
import json
import logging
import os
import sys
import keras
from keras import backend as K
from keras.preprocessing.image import ImageDataGenerator
import numpy as np
from PIL import ImageFile
from sklearn.metrics import classification_report, confusion_matrix
import tensorflow as tf
import nvidia_tao_tf1.cv.common.logging.logging as status_logging
from nvidia_tao_tf1.cv.common.utils import check_tf_oom, restore_eff
from nvidia_tao_tf1.cv.makenet.spec_handling.spec_loader import load_experiment_spec
from nvidia_tao_tf1.cv.makenet.utils.helper import get_input_shape, model_io, setup_config
from nvidia_tao_tf1.cv.makenet.utils import preprocess_crop # noqa pylint: disable=unused-import
from nvidia_tao_tf1.cv.makenet.utils.preprocess_input import preprocess_input
ImageFile.LOAD_TRUNCATED_IMAGES = True
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
K.set_session(tf.Session(config=config))
K.set_image_data_format('channels_first')
logger = logging.getLogger(__name__)
def build_command_line_parser(parser=None):
'''Parse command line arguments.'''
if parser is None:
parser = argparse.ArgumentParser(description='Evaluate a classification model.')
parser.add_argument(
'-e',
'--experiment_spec',
required=True,
type=str,
help='Path to the experiment spec file.'
)
parser.add_argument(
'-k',
'--key',
required=False,
default="",
type=str,
help='Key to load a .tlt model.'
)
parser.add_argument(
'-r',
"--results_dir",
type=str,
default=None,
help=argparse.SUPPRESS
)
parser.add_argument(
'-cm', '--classmap',
type=str,
help="Path to the classmap file.",
default=None
)
# Dummy args for deploy
parser.add_argument(
'-m',
'--model_path',
type=str,
required=False,
default=None,
help=argparse.SUPPRESS
)
parser.add_argument(
'-i',
'--image_dir',
type=str,
required=False,
default=None,
help=argparse.SUPPRESS
)
parser.add_argument(
'-l',
'--label_dir',
type=str,
required=False,
help=argparse.SUPPRESS
)
parser.add_argument(
'-b',
'--batch_size',
type=int,
required=False,
default=1,
help=argparse.SUPPRESS
)
return parser
def parse_command_line(args=None):
"""Simple function to parse command line arguments."""
parser = build_command_line_parser(args)
return parser.parse_args(args)
def run_evaluate(args=None):
"""Wrapper function to run evaluation of MakeNet model.
Args:
Dictionary arguments containing parameters parsed in the main function.
"""
# Set up status logging
if args.results_dir:
if not os.path.exists(args.results_dir):
os.makedirs(args.results_dir)
status_file = os.path.join(args.results_dir, "status.json")
status_logging.set_status_logger(
status_logging.StatusLogger(
filename=status_file,
is_master=True,
verbosity=1,
append=True
)
)
s_logger = status_logging.get_status_logger()
s_logger.write(
status_level=status_logging.Status.STARTED,
message="Starting evaluation."
)
# Set up logger verbosity.
verbosity = 'INFO'
# Configure the logger.
logging.basicConfig(
format='%(asctime)s [TAO Toolkit] [%(levelname)s] %(name)s %(lineno)d: %(message)s',
level=verbosity)
# Configure tf logger verbosity.
tf.logging.set_verbosity(tf.logging.INFO)
# Load experiment spec.
if args.experiment_spec is not None:
# Create an experiment_pb2.Experiment object from the input file.
logger.info("Loading experiment spec at %s.", args.experiment_spec)
# The spec in config_path has to be complete.
# Default spec is not merged into es.
es = load_experiment_spec(args.experiment_spec,
merge_from_default=False,
validation_schema="validation")
else:
logger.info("Loading the default experiment spec.")
es = load_experiment_spec(validation_schema="validation")
custom_objs = {}
# Decrypt and load the pretrained model
final_model = model_io(es.eval_config.model_path, enc_key=args.key, custom_objs=custom_objs)
# override BN config
if es.model_config.HasField("batch_norm_config"):
bn_config = es.model_config.batch_norm_config
else:
bn_config = None
# reg_config and freeze_bn are actually not useful, just use bn_config
# so the BN layer's output produces correct result.
# of course, only the BN epsilon matters in evaluation.
final_model = setup_config(
final_model,
es.train_config.reg_config,
freeze_bn=es.model_config.freeze_bn,
bn_config=bn_config,
custom_objs=custom_objs
)
# Defining optimizer
opt = keras.optimizers.SGD(lr=0, decay=1e-6, momentum=0.9, nesterov=False)
# Define precision/recall and F score metrics
topk_acc = partial(keras.metrics.top_k_categorical_accuracy,
k=es.eval_config.top_k)
topk_acc.__name__ = 'topk_acc'
# Compile model
final_model.compile(loss='categorical_crossentropy',
metrics=[topk_acc],
optimizer=opt)
# print model summary
final_model.summary()
# Get input shape
image_height, image_width, nchannels = get_input_shape(final_model)
assert nchannels in [1, 3], (
"Unsupported channel count {} for evaluation".format(nchannels)
)
color_mode = "rgb"
if nchannels == 1:
color_mode = "grayscale"
interpolation = es.model_config.resize_interpolation_method
interpolation_map = {
0: "bilinear",
1: "bicubic"
}
interpolation = interpolation_map[interpolation]
if es.eval_config.enable_center_crop:
interpolation += ":center"
img_mean = es.train_config.image_mean
if nchannels == 3:
if img_mean:
assert all(c in img_mean for c in ['r', 'g', 'b']) , (
"'r', 'g', 'b' should all be present in image_mean "
"for images with 3 channels."
)
img_mean = [img_mean['b'], img_mean['g'], img_mean['r']]
else:
img_mean = [103.939, 116.779, 123.68]
else:
if img_mean:
assert 'l' in img_mean, (
"'l' should be present in image_mean for images "
"with 1 channel."
)
img_mean = [img_mean['l']]
else:
img_mean = [117.3786]
# Initializing data generator
target_datagen = ImageDataGenerator(
preprocessing_function=partial(preprocess_input,
data_format='channels_first',
mode=es.train_config.preprocess_mode,
img_mean=img_mean,
color_mode=color_mode),
horizontal_flip=False)
if args.classmap:
# If classmap is provided, then we explicitly set it in ImageDataGenerator
with open(args.classmap, "r") as cmap_file:
try:
data = json.load(cmap_file)
except json.decoder.JSONDecodeError as e:
print(f"Loading the {args.classmap} failed with error\n{e}")
sys.exit(-1)
except Exception as e:
if e.output is not None:
print(f"Evaluation failed with error {e.output}")
sys.exit(-1)
if not data:
class_names = None
else:
class_names = [""] * len(list(data.keys()))
if not all([class_index < len(class_names)
and isinstance(class_index, int)
for class_index in data.values()]):
raise RuntimeError(
"Invalid data in the json file. The class index must "
"be < number of classes and an integer value.")
for class_name, class_index in data.items():
class_names[class_index] = class_name
print("Class name = {}".format(class_names))
else:
class_names = None
# Initializing data iterator
target_iterator = target_datagen.flow_from_directory(
es.eval_config.eval_dataset_path,
target_size=(image_height, image_width),
color_mode=color_mode,
batch_size=es.eval_config.batch_size,
classes=class_names,
class_mode='categorical',
interpolation=interpolation,
shuffle=False)
logger.info('Processing dataset (evaluation): {}'.format(es.eval_config.eval_dataset_path))
nclasses = target_iterator.num_classes
assert nclasses > 1, "Invalid number of classes in the evaluation dataset."
# If number of classes does not match the new data
assert nclasses == final_model.output.get_shape().as_list()[-1], \
"The number of classes of the loaded model doesn't match the \
number of classes in the evaluation dataset."
# Evaluate the model on the full data set.
score = final_model.evaluate_generator(target_iterator,
len(target_iterator),
workers=es.eval_config.n_workers,
use_multiprocessing=False)
print('Evaluation Loss: {}'.format(score[0]))
print('Evaluation Top K accuracy: {}'.format(score[1]))
# Re-initializing data iterator
target_iterator = target_datagen.flow_from_directory(
es.eval_config.eval_dataset_path,
target_size=(image_height, image_width),
batch_size=es.eval_config.batch_size,
color_mode=color_mode,
class_mode='categorical',
interpolation=interpolation,
shuffle=False)
logger.info("Calculating per-class P/R and confusion matrix. It may take a while...")
Y_pred = final_model.predict_generator(target_iterator, len(target_iterator), workers=1)
y_pred = np.argmax(Y_pred, axis=1)
print('Confusion Matrix')
print(confusion_matrix(target_iterator.classes, y_pred))
print('Classification Report')
class_dict = target_iterator.class_indices
target_labels = [c[1] for c in sorted(class_dict.items(), key=lambda x:x[1])]
target_names = [c[0] for c in sorted(class_dict.items(), key=lambda x:x[1])]
print(classification_report(target_iterator.classes,
y_pred, labels=target_labels,
target_names=target_names))
if args.results_dir:
s_logger.kpi.update({'top_k_accuracy': float(score[1])})
s_logger.write(
status_level=status_logging.Status.SUCCESS,
message="Evaluation finished successfully."
)
@check_tf_oom
def main(args=None):
"""Wrapper function for evaluating MakeNet application.
Args:
args: Dictionary arguments containing parameters defined by command line
parameters.
"""
# parse command line
try:
args = parse_command_line(args)
run_evaluate(args)
except (KeyboardInterrupt, SystemExit):
status_logging.get_status_logger().write(
message="Evaluation was interrupted",
verbosity_level=status_logging.Verbosity.INFO,
status_level=status_logging.Status.FAILURE
)
except Exception as e:
status_logging.get_status_logger().write(
message=str(e),
status_level=status_logging.Status.FAILURE
)
raise e
if __name__ == '__main__':
main()
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/makenet/scripts/evaluate.py |
"""Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import keras
from keras.layers import BatchNormalization
import numpy as np
import pytest
from nvidia_tao_tf1.cv.makenet.model.model_builder import get_model
from nvidia_tao_tf1.cv.makenet.scripts.train import setup_config
class RegConfig():
"""Class for reg config."""
def __init__(self, reg_type, scope, weight_decay):
self.type = reg_type
self.scope = scope
self.weight_decay = weight_decay
bn_config = (False, True)
@pytest.mark.parametrize("freeze_bn",
bn_config)
def test_freeze_bn(freeze_bn):
keras.backend.clear_session()
model = get_model(
"vgg",
input_shape=(3, 224, 224),
data_format="channels_first",
freeze_bn=freeze_bn,
nlayers=16,
use_batch_norm=True,
use_pooling=False,
dropout=0.0,
use_bias=False,
)
reg_config = RegConfig("L2", "Conv2D,Dense", 1e-5)
model = setup_config(
model,
reg_config,
freeze_bn=freeze_bn
)
model.compile(
loss="mse",
metrics=['accuracy'],
optimizer="sgd"
)
if freeze_bn:
assert check_freeze_bn(model), (
"BN layers not frozen, expected frozen."
)
else:
assert not check_freeze_bn(model), (
"BN layers frozen, expected not frozen."
)
def check_freeze_bn(model):
"""Check if the BN layers in a model is frozen or not."""
bn_weights = []
for l in model.layers:
if type(l) == BatchNormalization:
# only check for moving mean and moving variance
bn_weights.append(l.get_weights()[2:])
rand_input = np.random.random((1, 3, 224, 224))
# do training several times
out_shape = model.outputs[0].get_shape()[1:]
out_label = np.random.random((1,) + out_shape)
model.train_on_batch(rand_input, out_label)
model.train_on_batch(rand_input, out_label)
model.train_on_batch(rand_input, out_label)
# do prediction several times
model.predict(rand_input)
model.predict(rand_input)
model.predict(rand_input)
# finally, check BN weights
new_bn_weights = []
for l in model.layers:
if type(l) == BatchNormalization:
# only check for moving mean and moving variance
new_bn_weights.append(l.get_weights()[2:])
# check the bn weights
for old_w, new_w in zip(bn_weights, new_bn_weights):
if not np.array_equal(old_w, new_w):
return False
return True
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/makenet/scripts/tests/test_freeze_bn.py |
# Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
"""TLT command line wrapper to invoke CLI scripts."""
import sys
from nvidia_tao_tf1.cv.common.entrypoint.entrypoint import launch_job
import nvidia_tao_tf1.cv.makenet.scripts
def main():
"""Function to launch the job."""
launch_job(nvidia_tao_tf1.cv.makenet.scripts, "classification", sys.argv[1:])
if __name__ == "__main__":
main()
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/makenet/entrypoint/makenet.py |
# Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
"""TLT MakeNet entrypoint."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/makenet/entrypoint/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""IVA MakeNet model construction wrapper functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from keras.layers import AveragePooling2D, Dense, Flatten
from keras.layers import Input
from keras.models import Model
from nvidia_tao_tf1.core.templates.alexnet import AlexNet
from nvidia_tao_tf1.core.templates.cspdarknet import CSPDarkNet
from nvidia_tao_tf1.core.templates.cspdarknet_tiny import CSPDarkNetTiny
from nvidia_tao_tf1.core.templates.darknet import DarkNet
from nvidia_tao_tf1.core.templates.efficientnet import (
EfficientNetB0,
EfficientNetB1,
EfficientNetB2,
EfficientNetB3,
EfficientNetB4,
EfficientNetB5,
EfficientNetB6,
EfficientNetB7
)
from nvidia_tao_tf1.core.templates.googlenet import GoogLeNet
from nvidia_tao_tf1.core.templates.mobilenet import MobileNet, MobileNetV2
from nvidia_tao_tf1.core.templates.resnet import ResNet
from nvidia_tao_tf1.core.templates.squeezenet import SqueezeNet
from nvidia_tao_tf1.core.templates.vgg import VggNet
from nvidia_tao_tf1.cv.makenet.utils.helper import model_io
SUPPORTED_ARCHS = [
"resnet", "vgg", "alexnet", "googlenet",
"mobilenet_v1", "mobilenet_v2", "squeezenet",
"darknet", "efficientnet_b0", "efficientnet_b1",
"efficientnet_b2", "efficientnet_b3",
"efficientnet_b4", "efficientnet_b5",
"efficientnet_b6", "efficientnet_b7",
"cspdarknet", "cspdarknet_tiny"
]
def add_dense_head(nclasses, base_model, data_format, kernel_regularizer, bias_regularizer):
"""Wrapper to add dense head to the backbone structure."""
output = base_model.output
output_shape = output.get_shape().as_list()
if data_format == 'channels_first':
pool_size = (output_shape[-2], output_shape[-1])
else:
pool_size = (output_shape[-3], output_shape[-2])
output = AveragePooling2D(pool_size=pool_size, name='avg_pool',
data_format=data_format, padding='valid')(output)
output = Flatten(name='flatten')(output)
output = Dense(nclasses, activation='softmax', name='predictions',
kernel_regularizer=kernel_regularizer, bias_regularizer=bias_regularizer)(output)
final_model = Model(inputs=base_model.input, outputs=output, name=base_model.name)
return final_model
def get_googlenet(input_shape=(3, 224, 224),
data_format='channels_first',
nclasses=1000,
kernel_regularizer=None,
bias_regularizer=None,
use_batch_norm=True,
retain_head=False,
use_bias=True,
freeze_bn=False,
freeze_blocks=None):
"""Wrapper to get GoogLeNet model from IVA templates."""
input_image = Input(shape=input_shape)
final_model = GoogLeNet(inputs=input_image,
data_format=data_format,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
use_batch_norm=use_batch_norm,
activation_type='relu',
add_head=retain_head,
nclasses=nclasses,
freeze_bn=freeze_bn,
freeze_blocks=freeze_blocks,
use_bias=use_bias)
if not retain_head:
final_model = add_dense_head(nclasses,
final_model,
data_format,
kernel_regularizer,
bias_regularizer)
return final_model
def get_alexnet(input_shape=(3, 224, 224),
data_format='channels_first',
nclasses=1000,
kernel_regularizer=None,
bias_regularizer=None,
retain_head=False,
freeze_blocks=None):
"""Wrapper to get AlexNet model from Maglev templates."""
final_model = AlexNet(input_shape=input_shape,
data_format=data_format,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
add_head=retain_head,
nclasses=nclasses,
freeze_blocks=freeze_blocks)
if not retain_head:
final_model = add_dense_head(nclasses, final_model,
data_format, kernel_regularizer,
bias_regularizer)
return final_model
def get_resnet(nlayers=18,
input_shape=(3, 224, 224),
data_format='channels_first',
nclasses=1000,
kernel_regularizer=None,
bias_regularizer=None,
all_projections=True,
use_batch_norm=True,
use_pooling=False,
retain_head=False,
use_bias=True,
freeze_bn=False,
freeze_blocks=None):
"""Wrapper to get ResNet model from Maglev templates."""
input_image = Input(shape=input_shape)
final_model = ResNet(nlayers=nlayers,
input_tensor=input_image,
data_format=data_format,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
use_batch_norm=use_batch_norm,
activation_type='relu',
all_projections=all_projections,
use_pooling=use_pooling,
add_head=retain_head,
nclasses=nclasses,
freeze_blocks=freeze_blocks,
freeze_bn=freeze_bn,
use_bias=use_bias)
if not retain_head:
final_model = add_dense_head(nclasses, final_model,
data_format, kernel_regularizer,
bias_regularizer)
return final_model
def get_darknet(nlayers=19,
input_shape=(3, 224, 224),
data_format='channels_first',
nclasses=1000,
alpha=0.1,
kernel_regularizer=None,
bias_regularizer=None,
use_batch_norm=True,
retain_head=False,
use_bias=False,
freeze_bn=False,
freeze_blocks=None):
"""Wrapper to get DarkNet model."""
input_image = Input(shape=input_shape)
final_model = DarkNet(nlayers=nlayers,
input_tensor=input_image,
data_format=data_format,
alpha=alpha,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
use_batch_norm=use_batch_norm,
add_head=retain_head,
nclasses=nclasses,
freeze_blocks=freeze_blocks,
freeze_bn=freeze_bn,
use_bias=use_bias)
if not retain_head:
final_model = add_dense_head(nclasses, final_model,
data_format, kernel_regularizer,
bias_regularizer)
return final_model
def get_cspdarknet(nlayers=19,
input_shape=(3, 224, 224),
data_format='channels_first',
nclasses=1000,
kernel_regularizer=None,
bias_regularizer=None,
use_batch_norm=True,
retain_head=False,
use_bias=False,
freeze_bn=False,
freeze_blocks=None,
activation="leaky_relu"):
"""Wrapper to get CSPDarkNet model."""
input_image = Input(shape=input_shape)
final_model = CSPDarkNet(nlayers=nlayers,
input_tensor=input_image,
data_format=data_format,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
use_batch_norm=use_batch_norm,
add_head=retain_head,
nclasses=nclasses,
freeze_blocks=freeze_blocks,
freeze_bn=freeze_bn,
use_bias=use_bias,
activation=activation)
if not retain_head:
final_model = add_dense_head(nclasses, final_model,
data_format, kernel_regularizer,
bias_regularizer)
return final_model
def get_cspdarknet_tiny(
input_shape=(3, 224, 224),
data_format='channels_first',
nclasses=1000,
kernel_regularizer=None,
bias_regularizer=None,
use_batch_norm=True,
retain_head=False,
use_bias=False,
freeze_bn=False,
freeze_blocks=None,
activation="leaky_relu",
):
"""Wrapper to get CSPDarkNetTiny model."""
input_image = Input(shape=input_shape)
final_model = CSPDarkNetTiny(
input_tensor=input_image,
data_format=data_format,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
use_batch_norm=use_batch_norm,
add_head=retain_head,
nclasses=nclasses,
freeze_blocks=freeze_blocks,
freeze_bn=freeze_bn,
use_bias=use_bias,
activation=activation
)
if not retain_head:
final_model = add_dense_head(nclasses, final_model,
data_format, kernel_regularizer,
bias_regularizer)
return final_model
def get_vgg(nlayers=16,
input_shape=(3, 224, 224),
data_format="channels_first",
nclasses=1000,
kernel_regularizer=None,
bias_regularizer=None,
use_batch_norm=True,
use_pooling=False,
retain_head=False,
use_bias=True,
freeze_bn=False,
freeze_blocks=None,
dropout=0.5):
"""Wrapper to get VGG model from IVA templates."""
input_image = Input(shape=input_shape)
final_model = VggNet(nlayers=nlayers,
inputs=input_image,
data_format=data_format,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
use_batch_norm=use_batch_norm,
activation_type='relu',
use_pooling=use_pooling,
add_head=retain_head,
nclasses=nclasses,
freeze_bn=freeze_bn,
freeze_blocks=freeze_blocks,
use_bias=use_bias,
dropout=dropout)
if not retain_head:
final_model = add_dense_head(nclasses, final_model,
data_format, kernel_regularizer,
bias_regularizer)
return final_model
def get_mobilenet(input_shape=None,
data_format='channels_first',
nclasses=1000,
use_batch_norm=None,
kernel_regularizer=None,
bias_regularizer=None,
retain_head=False,
use_bias=True,
freeze_bn=False,
freeze_blocks=None,
stride=32):
"""Wrapper to get MobileNet model from IVA templates."""
input_image = Input(shape=input_shape)
final_model = MobileNet(inputs=input_image,
input_shape=input_shape,
dropout=0.0,
add_head=retain_head,
stride=stride,
data_format=data_format,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
nclasses=nclasses,
use_batch_norm=use_batch_norm,
use_bias=use_bias,
freeze_bn=freeze_bn,
freeze_blocks=freeze_blocks)
if not retain_head:
final_model = add_dense_head(nclasses,
final_model,
data_format,
kernel_regularizer,
bias_regularizer)
return final_model
def get_mobilenet_v2(input_shape=None,
data_format='channels_first',
nclasses=1000,
use_batch_norm=None,
kernel_regularizer=None,
bias_regularizer=None,
retain_head=False,
all_projections=False,
use_bias=True,
freeze_bn=False,
freeze_blocks=None,
stride=32):
"""Wrapper to get MobileNet V2 model from IVA templates."""
input_image = Input(shape=input_shape)
final_model = MobileNetV2(inputs=input_image,
input_shape=input_shape,
add_head=retain_head,
stride=stride,
data_format=data_format,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
all_projections=all_projections,
nclasses=nclasses,
use_batch_norm=use_batch_norm,
use_bias=use_bias,
freeze_bn=freeze_bn,
freeze_blocks=freeze_blocks)
if not retain_head:
final_model = add_dense_head(nclasses,
final_model,
data_format,
kernel_regularizer,
bias_regularizer)
return final_model
def get_squeezenet(input_shape=None,
data_format='channels_first',
nclasses=1000,
dropout=1e-3,
kernel_regularizer=None,
bias_regularizer=None,
retain_head=False,
freeze_blocks=None):
"""Wrapper to get SqueezeNet model from IVA templates."""
input_image = Input(shape=input_shape)
final_model = SqueezeNet(inputs=input_image,
input_shape=input_shape,
dropout=1e-3,
add_head=retain_head,
data_format=data_format,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
nclasses=nclasses,
freeze_blocks=freeze_blocks)
if not retain_head:
final_model = add_dense_head(nclasses,
final_model,
data_format,
kernel_regularizer,
bias_regularizer)
return final_model
def get_efficientnet_b0(
input_shape=None,
data_format='channels_first',
nclasses=1000,
use_bias=False,
kernel_regularizer=None,
bias_regularizer=None,
retain_head=False,
freeze_bn=False,
freeze_blocks=None,
stride16=False,
activation_type=None
):
"""Get an EfficientNet B0 model."""
input_image = Input(shape=input_shape)
final_model = EfficientNetB0(
input_tensor=input_image,
input_shape=input_shape,
add_head=retain_head,
data_format=data_format,
use_bias=use_bias,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
classes=nclasses,
freeze_bn=freeze_bn,
freeze_blocks=freeze_blocks,
stride16=stride16,
activation_type=activation_type
)
if not retain_head:
final_model = add_dense_head(nclasses,
final_model,
data_format,
kernel_regularizer,
bias_regularizer)
return final_model
def get_efficientnet_b1(
input_shape=None,
data_format='channels_first',
nclasses=1000,
use_bias=False,
kernel_regularizer=None,
bias_regularizer=None,
retain_head=False,
freeze_bn=False,
freeze_blocks=None,
stride16=False,
activation_type=None
):
"""Get an EfficientNet B1 model."""
input_image = Input(shape=input_shape)
final_model = EfficientNetB1(
input_tensor=input_image,
input_shape=input_shape,
add_head=retain_head,
data_format=data_format,
use_bias=use_bias,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
classes=nclasses,
freeze_bn=freeze_bn,
freeze_blocks=freeze_blocks,
stride16=stride16,
activation_type=activation_type
)
if not retain_head:
final_model = add_dense_head(nclasses,
final_model,
data_format,
kernel_regularizer,
bias_regularizer)
return final_model
def get_efficientnet_b2(
input_shape=None,
data_format='channels_first',
nclasses=1000,
use_bias=False,
kernel_regularizer=None,
bias_regularizer=None,
retain_head=False,
freeze_bn=False,
freeze_blocks=None,
stride16=False,
activation_type=None
):
"""Get an EfficientNet B2 model."""
input_image = Input(shape=input_shape)
final_model = EfficientNetB2(
input_tensor=input_image,
input_shape=input_shape,
add_head=retain_head,
data_format=data_format,
use_bias=use_bias,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
classes=nclasses,
freeze_bn=freeze_bn,
freeze_blocks=freeze_blocks,
stride16=stride16,
activation_type=activation_type
)
if not retain_head:
final_model = add_dense_head(nclasses,
final_model,
data_format,
kernel_regularizer,
bias_regularizer)
return final_model
def get_efficientnet_b3(
input_shape=None,
data_format='channels_first',
nclasses=1000,
use_bias=False,
kernel_regularizer=None,
bias_regularizer=None,
retain_head=False,
freeze_bn=False,
freeze_blocks=None,
stride16=False,
activation_type=None
):
"""Get an EfficientNet B3 model."""
input_image = Input(shape=input_shape)
final_model = EfficientNetB3(
input_tensor=input_image,
input_shape=input_shape,
add_head=retain_head,
data_format=data_format,
use_bias=use_bias,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
classes=nclasses,
freeze_bn=freeze_bn,
freeze_blocks=freeze_blocks,
stride16=stride16,
activation_type=activation_type
)
if not retain_head:
final_model = add_dense_head(nclasses,
final_model,
data_format,
kernel_regularizer,
bias_regularizer)
return final_model
def get_efficientnet_b4(
input_shape=None,
data_format='channels_first',
nclasses=1000,
use_bias=False,
kernel_regularizer=None,
bias_regularizer=None,
retain_head=False,
freeze_bn=False,
freeze_blocks=None,
stride16=False,
activation_type=None
):
"""Get an EfficientNet B4 model."""
input_image = Input(shape=input_shape)
final_model = EfficientNetB4(
input_tensor=input_image,
input_shape=input_shape,
add_head=retain_head,
data_format=data_format,
use_bias=use_bias,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
classes=nclasses,
freeze_bn=freeze_bn,
freeze_blocks=freeze_blocks,
stride16=stride16,
activation_type=activation_type
)
if not retain_head:
final_model = add_dense_head(nclasses,
final_model,
data_format,
kernel_regularizer,
bias_regularizer)
return final_model
def get_efficientnet_b5(
input_shape=None,
data_format='channels_first',
nclasses=1000,
use_bias=False,
kernel_regularizer=None,
bias_regularizer=None,
retain_head=False,
freeze_bn=False,
freeze_blocks=None,
stride16=False,
activation_type=None
):
"""Get an EfficientNet B5 model."""
input_image = Input(shape=input_shape)
final_model = EfficientNetB5(
input_tensor=input_image,
input_shape=input_shape,
add_head=retain_head,
data_format=data_format,
use_bias=use_bias,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
classes=nclasses,
freeze_bn=freeze_bn,
freeze_blocks=freeze_blocks,
stride16=stride16,
activation_type=activation_type
)
if not retain_head:
final_model = add_dense_head(nclasses,
final_model,
data_format,
kernel_regularizer,
bias_regularizer)
return final_model
def get_efficientnet_b6(
input_shape=None,
data_format='channels_first',
nclasses=1000,
use_bias=False,
kernel_regularizer=None,
bias_regularizer=None,
retain_head=False,
freeze_bn=False,
freeze_blocks=None,
stride16=False,
activation_type=None
):
"""Get an EfficientNet B6 model."""
input_image = Input(shape=input_shape)
final_model = EfficientNetB6(
input_tensor=input_image,
input_shape=input_shape,
add_head=retain_head,
data_format=data_format,
use_bias=use_bias,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
classes=nclasses,
freeze_bn=freeze_bn,
freeze_blocks=freeze_blocks,
stride16=stride16,
activation_type=activation_type
)
if not retain_head:
final_model = add_dense_head(nclasses,
final_model,
data_format,
kernel_regularizer,
bias_regularizer)
return final_model
def get_efficientnet_b7(
input_shape=None,
data_format='channels_first',
nclasses=1000,
use_bias=False,
kernel_regularizer=None,
bias_regularizer=None,
retain_head=False,
freeze_bn=False,
freeze_blocks=None,
stride16=False,
activation_type=None
):
"""Get an EfficientNet B7 model."""
input_image = Input(shape=input_shape)
final_model = EfficientNetB7(
input_tensor=input_image,
input_shape=input_shape,
add_head=retain_head,
data_format=data_format,
use_bias=use_bias,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
classes=nclasses,
freeze_bn=freeze_bn,
freeze_blocks=freeze_blocks,
stride16=stride16,
activation_type=activation_type
)
if not retain_head:
final_model = add_dense_head(nclasses,
final_model,
data_format,
kernel_regularizer,
bias_regularizer)
return final_model
# defining model dictionary
model_choose = {"resnet": get_resnet,
"darknet": get_darknet,
"cspdarknet": get_cspdarknet,
"cspdarknet_tiny": get_cspdarknet_tiny,
"vgg": get_vgg,
"googlenet": get_googlenet,
"alexnet": get_alexnet,
"mobilenet_v1": get_mobilenet,
"mobilenet_v2": get_mobilenet_v2,
"squeezenet": get_squeezenet,
"efficientnet_b0": get_efficientnet_b0,
"efficientnet_b1": get_efficientnet_b1,
"efficientnet_b2": get_efficientnet_b2,
"efficientnet_b3": get_efficientnet_b3,
"efficientnet_b4": get_efficientnet_b4,
"efficientnet_b5": get_efficientnet_b5,
"efficientnet_b6": get_efficientnet_b6,
"efficientnet_b7": get_efficientnet_b7}
def get_model(arch="resnet",
input_shape=(3, 224, 224),
data_format=None,
nclasses=1000,
kernel_regularizer=None,
bias_regularizer=None,
retain_head=False,
freeze_blocks=None,
**kwargs):
"""Wrapper to chose model defined in iva templates."""
kwa = dict()
if arch == 'googlenet':
kwa['use_batch_norm'] = kwargs['use_batch_norm']
kwa['use_bias'] = kwargs['use_bias']
kwa['freeze_bn'] = kwargs['freeze_bn']
elif arch == 'alexnet':
pass
elif arch == 'resnet':
kwa['nlayers'] = kwargs['nlayers']
kwa['use_batch_norm'] = kwargs['use_batch_norm']
kwa['use_pooling'] = kwargs['use_pooling']
kwa['freeze_bn'] = kwargs['freeze_bn']
kwa['use_bias'] = kwargs['use_bias']
kwa['all_projections'] = kwargs['all_projections']
elif arch in ['darknet', 'cspdarknet']:
kwa['nlayers'] = kwargs['nlayers']
kwa['freeze_bn'] = kwargs['freeze_bn']
kwa['use_bias'] = kwargs['use_bias']
kwa['use_batch_norm'] = kwargs['use_batch_norm']
if arch == "cspdarknet":
kwa["activation"] = kwargs["activation"].activation_type
elif arch in ["cspdarknet_tiny"]:
kwa['freeze_bn'] = kwargs['freeze_bn']
kwa['use_bias'] = kwargs['use_bias']
kwa['use_batch_norm'] = kwargs['use_batch_norm']
kwa["activation"] = kwargs["activation"].activation_type
elif arch == 'vgg':
kwa['nlayers'] = kwargs['nlayers']
kwa['use_batch_norm'] = kwargs['use_batch_norm']
kwa['use_pooling'] = kwargs['use_pooling']
kwa['freeze_bn'] = kwargs['freeze_bn']
kwa['use_bias'] = kwargs['use_bias']
kwa['dropout'] = kwargs['dropout']
elif arch == 'mobilenet_v1':
kwa['use_batch_norm'] = kwargs['use_batch_norm']
kwa['use_bias'] = kwargs['use_bias']
kwa['freeze_bn'] = kwargs['freeze_bn']
elif arch == 'mobilenet_v2':
kwa['use_batch_norm'] = kwargs['use_batch_norm']
kwa['use_bias'] = kwargs['use_bias']
kwa['freeze_bn'] = kwargs['freeze_bn']
kwa['all_projections'] = kwargs['all_projections']
elif arch == 'squeezenet':
kwa['dropout'] = kwargs['dropout']
elif arch == "efficientnet_b0":
kwa['use_bias'] = kwargs['use_bias']
kwa['freeze_bn'] = kwargs['freeze_bn']
kwa['activation_type'] = kwargs['activation'].activation_type
elif arch == "efficientnet_b1":
kwa['use_bias'] = kwargs['use_bias']
kwa['freeze_bn'] = kwargs['freeze_bn']
kwa['activation_type'] = kwargs['activation'].activation_type
elif arch == "efficientnet_b2":
kwa['use_bias'] = kwargs['use_bias']
kwa['freeze_bn'] = kwargs['freeze_bn']
kwa['activation_type'] = kwargs['activation'].activation_type
elif arch == "efficientnet_b3":
kwa['use_bias'] = kwargs['use_bias']
kwa['freeze_bn'] = kwargs['freeze_bn']
kwa['activation_type'] = kwargs['activation'].activation_type
elif arch == "efficientnet_b4":
kwa['use_bias'] = kwargs['use_bias']
kwa['freeze_bn'] = kwargs['freeze_bn']
kwa['activation_type'] = kwargs['activation'].activation_type
elif arch == "efficientnet_b5":
kwa['use_bias'] = kwargs['use_bias']
kwa['freeze_bn'] = kwargs['freeze_bn']
kwa['activation_type'] = kwargs['activation'].activation_type
elif arch == "efficientnet_b6":
kwa['use_bias'] = kwargs['use_bias']
kwa['freeze_bn'] = kwargs['freeze_bn']
kwa['activation_type'] = kwargs['activation'].activation_type
elif arch == "efficientnet_b7":
kwa['use_bias'] = kwargs['use_bias']
kwa['freeze_bn'] = kwargs['freeze_bn']
kwa['activation_type'] = kwargs['activation'].activation_type
else:
raise ValueError('Unsupported architecture: {}'.format(arch))
model = model_choose[arch](input_shape=input_shape,
nclasses=nclasses,
data_format=data_format,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
retain_head=retain_head,
freeze_blocks=freeze_blocks,
**kwa)
return model
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/makenet/model/model_builder.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Build models for MakeNet."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/makenet/model/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""File containing constants for the spec handling."""
from nvidia_tao_tf1.cv.common.spec_validator import ValueChecker
TRAINVAL_OPTIONAL_CHECK_DICT = {
# model config optional parameters.
"n_layers": [ValueChecker(">", 0)],
"freeze_blocks": [ValueChecker(">=", 0)],
}
TRAINVAL_VALUE_CHECK_DICT = {
# model config parameters.
"arch": [ValueChecker("!=", ""),
ValueChecker("in", ["resnet",
"vgg",
"alexnet",
"googlenet",
"mobilenet_v1",
"mobilenet_v2",
"squeezenet",
"darknet",
"efficientnet_b0",
"efficientnet_b1",
"efficientnet_b2",
"efficientnet_b3",
"efficientnet_b4",
"efficientnet_b5",
"efficientnet_b6",
"efficientnet_b7",
"cspdarknet",
"cspdarknet_tiny"])],
"input_image_size": [ValueChecker("!=", "")],
"activation_type": [ValueChecker("!=", "")],
# training config
"n_workers": [ValueChecker(">", 0)],
"label_smoothing": [ValueChecker(">=", 0.0)],
"mixup_alpha": [ValueChecker(">=", 0.0)],
"train_dataset_path": [ValueChecker("!=", "")],
"val_dataset_path": [ValueChecker("!=", "")],
"n_epochs": [ValueChecker(">", 0)],
"batch_size_per_gpu": [ValueChecker(">", 0)],
"preprocess_mode": [ValueChecker("in", ["tf", "caffe", "torch"])],
# evaluation config required parameters.
"eval_dataset_path": [ValueChecker("!=", "")],
# Learning rate scheduler config.
"learning_rate": [ValueChecker(">", 0.0)],
# optimizer config.
"momentum": [ValueChecker(">=", 0)],
"epsilon": [ValueChecker(">=", 0)],
"rho": [ValueChecker(">=", 0)],
"beta_1": [ValueChecker(">=", 0)],
"beta_2": [ValueChecker(">=", 0)],
"decay": [ValueChecker(">=", 0)],
"dropout": [ValueChecker(">=", 0.0)],
"step_size": [ValueChecker(">=", 0)],
"gamma": [ValueChecker(">=", 0.0)],
"soft_start": [ValueChecker(">=", 0)],
"annealing_divider": [ValueChecker(">=", 0)],
"annealing_points": [ValueChecker(">=", 0)],
"min_lr_ratio": [ValueChecker(">=", 0.0)],
"lr": [ValueChecker(">", 0.0)],
# regularizer config
"type": [ValueChecker("!=", ""), ValueChecker("in", ["L1", "L2", "None"])],
"scope": [ValueChecker("!=", "")],
"weight_decay": [ValueChecker(">", 0.0)],
}
TRAINVAL_EXP_REQUIRED_MSG = ["model_config", "train_config"]
VALIDATION_EXP_REQUIRED_MSG = TRAINVAL_EXP_REQUIRED_MSG + ["eval_config"]
TRAINVAL_REQUIRED_MSG_DICT = {
"model_config": ["arch", "input_image_size"],
"eval_config": ["top_k", "eval_dataset_path",
"model_path", "batch_size"],
"train_config": [
"train_dataset_path", "val_dataset_path", "optimizer",
"batch_size_per_gpu", "n_epochs", "reg_config", "lr_config"
],
"reg_config": ["type", "scope", "weight_decay"]
}
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/makenet/spec_handling/constants.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Spec Handling for MakeNet."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/makenet/spec_handling/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Load MakeNet experiment spec .txt files and return an experiment_pb2."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import os
from google.protobuf.text_format import Merge as merge_text_proto
from nvidia_tao_tf1.cv.common.spec_validator import SpecValidator
import nvidia_tao_tf1.cv.makenet.proto.experiment_pb2 as experiment_pb2
from nvidia_tao_tf1.cv.makenet.spec_handling.constants import (
TRAINVAL_EXP_REQUIRED_MSG,
TRAINVAL_OPTIONAL_CHECK_DICT,
TRAINVAL_REQUIRED_MSG_DICT,
TRAINVAL_VALUE_CHECK_DICT,
VALIDATION_EXP_REQUIRED_MSG
)
VALIDATION_SCHEMA = {
"train_val": {
"required_msg_dict": TRAINVAL_REQUIRED_MSG_DICT,
"value_checker_dict": TRAINVAL_VALUE_CHECK_DICT,
"required_msg": TRAINVAL_EXP_REQUIRED_MSG,
"optional_check_dict": TRAINVAL_OPTIONAL_CHECK_DICT,
"proto": experiment_pb2.Experiment(),
"default_spec": "experiment_specs/default_spec.txt"
},
"validation": {
"required_msg_dict": TRAINVAL_REQUIRED_MSG_DICT,
"value_checker_dict": TRAINVAL_VALUE_CHECK_DICT,
"required_msg": VALIDATION_EXP_REQUIRED_MSG,
"optional_check_dict": TRAINVAL_OPTIONAL_CHECK_DICT,
"proto": experiment_pb2.Experiment(),
"default_spec": "experiment_specs/default_spec.txt"
}
}
logger = logging.getLogger(__name__)
def validate_spec(spec, validation_schema="train_val"):
"""Validate the loaded experiment spec file."""
assert validation_schema in list(VALIDATION_SCHEMA.keys()), (
"Invalidation specification file schema: {}".format(validation_schema)
)
schema = VALIDATION_SCHEMA[validation_schema]
if schema["required_msg"] is None:
schema["required_msg"] = []
spec_validator = SpecValidator(required_msg_dict=schema["required_msg_dict"],
value_checker_dict=schema["value_checker_dict"])
spec_validator.validate(spec, schema["required_msg"])
def load_proto(spec_path, proto_buffer, default_spec_path,
merge_from_default=True):
"""Load spec from file and merge with given proto_buffer instance.
Args:
spec_path (str): location of a file containing the custom spec proto.
proto_buffer(pb2): protocal buffer instance to be loaded.
default_spec_path(str): location of default spec to use if
merge_from_default is True.
merge_from_default (bool): disable default spec, if False, spec_path
must be set.
Returns:
proto_buffer(pb2): protocol buffer instance updated with spec.
"""
def _load_from_file(filename, pb2):
with open(filename, "r") as f:
merge_text_proto(f.read(), pb2)
# Setting this flag false prevents concatenating repeated-fields
if merge_from_default:
assert default_spec_path, \
"default spec path has to be defined if" \
"merge_from_default is enabled"
# Load the default spec
_load_from_file(default_spec_path, proto_buffer)
else:
assert spec_path, "spec_path has to be defined," \
"if merge_from_default is disabled"
# Merge a custom proto on top of the default spec, if given
if spec_path:
_load_from_file(spec_path, proto_buffer)
return proto_buffer
def load_experiment_spec(spec_path=None, merge_from_default=True, validation_schema="train_val"):
"""Load experiment spec from a .txt file.
Args:
spec_path (str): location of a file containing the custom experiment
spec proto.
merge_from_default (bool):
disable default spec, if False, spec_path
must be set.
Returns:
experiment_spec: protocol buffer instance of type
experiment_pb2.Experiment.
"""
experiment_spec = VALIDATION_SCHEMA[validation_schema]["proto"]
file_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
default_spec_path = os.path.join(file_path,
VALIDATION_SCHEMA[validation_schema]["default_spec"])
experiment_spec = load_proto(spec_path, experiment_spec, default_spec_path,
merge_from_default)
validate_spec(experiment_spec, validation_schema=validation_schema)
return experiment_spec
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/makenet/spec_handling/spec_loader.py |
# Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
"""Module containing sample spec files."""
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/makenet/experiment_specs/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Script to export a trained SSD model to an ETLT file for deployment."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/makenet/export/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""DetectNet_v2 calibrator class for TensorRT INT8 Calibration."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from functools import partial
import logging
import keras
from keras.preprocessing.image import ImageDataGenerator
import numpy as np
import pycuda.autoinit # noqa pylint: disable=unused-import
import pycuda.driver as cuda
import tensorflow as tf
# Simple helper class for calibration.
from nvidia_tao_tf1.cv.common.export.base_calibrator import BaseCalibrator
# Building the classification dataloader.
from nvidia_tao_tf1.cv.makenet.utils import preprocess_crop # noqa pylint: disable=unused-import
from nvidia_tao_tf1.cv.makenet.utils.preprocess_input import preprocess_input
logger = logging.getLogger(__name__)
class ClassificationCalibrator(BaseCalibrator):
"""Detectnet_v2 calibrator class."""
def __init__(self, experiment_spec, cache_filename,
n_batches, batch_size,
*args, **kwargs):
"""Init routine.
This inherits from ``iva.common.export.base_calibrator.BaseCalibrator``
to implement the calibration interface that TensorRT needs to
calibrate the INT8 quantization factors. The data source here is assumed
to be the data tensors that are yielded from the DetectNet_v2 dataloader.
Args:
data_filename (str): ``TensorFile`` data file to use.
cache_filename (str): name of calibration file to read/write to.
n_batches (int): number of batches for calibrate for.
batch_size (int): batch size to use for calibration (this must be
smaller or equal to the batch size of the provided data).
"""
super(ClassificationCalibrator, self).__init__(
cache_filename,
n_batches, batch_size,
*args, **kwargs
)
self._data_source = None
# Instantiate the dataloader.
self.instantiate_data_source(experiment_spec)
# Configure tensorflow before running tensorrt.
self.set_session()
def set_session(self):
"""Simple function to set the tensorflow session."""
# Setting this to minimize the default allocation at import.
gpu_options = tf.compat.v1.GPUOptions(
per_process_gpu_memory_fraction=0.33,
allow_growth=True)
# Configuring tensorflow to use CPU so that is doesn't interfere
# with tensorrt.
device_count = {'GPU': 0, 'CPU': 1}
session_config = tf.compat.v1.ConfigProto(
gpu_options=gpu_options,
device_count=device_count
)
tf_session = tf.compat.v1.Session(
config=session_config,
graph=tf.get_default_graph()
)
# Setting the keras session.
keras.backend.set_session(tf_session)
self.session = keras.backend.get_session()
def instantiate_data_source(self, experiment_spec):
"""Simple function to instantiate the data_source of the dataloader.
Args:
experiment_spec (iva.detectnet_v2.proto.experiment_pb2): Detectnet_v2
experiment spec proto object.
Returns:
No explicit returns.
"""
if not (hasattr(experiment_spec, 'train_config') or
hasattr(experiment_spec, 'model_config')):
raise ValueError(
"Experiment spec doesnt' have train_config or "
"model_config. Please make sure the train_config "
"and model_config are both present in the experiment_spec "
"file provided.")
model_config = experiment_spec.model_config
image_shape = model_config.input_image_size.split(",")
n_channel = int(image_shape[0])
image_height = int(image_shape[1])
image_width = int(image_shape[2])
assert n_channel in [1, 3], "Invalid input image dimension."
assert image_height >= 16, "Image height should be greater than 15 pixels."
assert image_width >= 16, "Image width should be greater than 15 pixels."
img_mean = experiment_spec.train_config.image_mean
if n_channel == 3:
if img_mean:
assert all(c in img_mean for c in ['r', 'g', 'b']) , (
"'r', 'g', 'b' should all be present in image_mean "
"for images with 3 channels."
)
img_mean = [img_mean['b'], img_mean['g'], img_mean['r']]
else:
img_mean = [103.939, 116.779, 123.68]
else:
if img_mean:
assert 'l' in img_mean, (
"'l' should be present in image_mean for images "
"with 1 channel."
)
img_mean = [img_mean['l']]
else:
img_mean = [117.3786]
# Define path to dataset.
train_data = experiment_spec.train_config.train_dataset_path
# Setting dataloader color_mode.
color_mode = "rgb"
if n_channel == 1:
color_mode = "grayscale"
preprocessing_func = partial(
preprocess_input,
data_format="channels_first",
mode=experiment_spec.train_config.preprocess_mode,
color_mode=color_mode,
img_mean=img_mean
)
# Initialize the data generator.
logger.info("Setting up input generator.")
train_datagen = ImageDataGenerator(
preprocessing_function=preprocessing_func,
horizontal_flip=False,
featurewise_center=False
)
logger.debug("Setting up iterator.")
train_iterator = train_datagen.flow_from_directory(
train_data,
target_size=(
image_height,
image_width
),
batch_size=self.batch_size,
class_mode='categorical',
color_mode=color_mode
)
logger.info("Number of samples from the dataloader: {}".format(train_iterator.n))
num_available_batches = int(train_iterator.num_samples / self.batch_size)
assert self._n_batches <= num_available_batches, (
f"n_batches <= num_available_batches, n_batches={self._n_batches}, "
f"num_available_batches={num_available_batches}"
)
self._data_source = train_iterator
def get_data_from_source(self):
"""Simple function to get data from the defined data_source."""
batch, _ = next(self._data_source)
if batch is None:
raise ValueError(
"Batch wasn't yielded from the data source. You may have run "
"out of batches. Please set the num batches accordingly")
return batch
def get_batch(self, names):
"""Return one batch.
Args:
names (list): list of memory bindings names.
"""
if self._batch_count < self._n_batches:
batch = self.get_data_from_source()
if batch is not None:
if self._data_mem is None:
# 4 bytes per float32.
self._data_mem = cuda.mem_alloc(batch.size * 4)
self._batch_count += 1
# Transfer input data to device.
cuda.memcpy_htod(self._data_mem, np.ascontiguousarray(
batch, dtype=np.float32))
return [int(self._data_mem)]
if self._batch_count >= self._n_batches:
self.session.close()
tf.reset_default_graph()
if self._data_mem is not None:
self._data_mem.free()
return None
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/makenet/export/classification_calibrator.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base class to export trained .tlt models to etlt file."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import logging
import os
import sys
import tempfile
import keras
from numba import cuda
import tensorflow as tf
from nvidia_tao_tf1.core.export._onnx import keras_to_onnx
# Import quantization layer processing.
from nvidia_tao_tf1.core.export._quantized import (
check_for_quantized_layers,
)
from nvidia_tao_tf1.core.export._uff import keras_to_pb, keras_to_uff
from nvidia_tao_tf1.core.export.app import get_model_input_dtype
from nvidia_tao_tf1.cv.common.export.keras_exporter import KerasExporter as Exporter
from nvidia_tao_tf1.cv.common.export.keras_exporter import SUPPORTED_ONNX_ROUTES
from nvidia_tao_tf1.cv.common.export.tensorfile_calibrator import TensorfileCalibrator
from nvidia_tao_tf1.cv.common.export.utils import pb_to_onnx
from nvidia_tao_tf1.cv.common.types.base_ds_config import BaseDSConfig
from nvidia_tao_tf1.cv.common.utils import CUSTOM_OBJS, get_decoded_filename, model_io
from nvidia_tao_tf1.cv.makenet.export.classification_calibrator import ClassificationCalibrator
from nvidia_tao_tf1.cv.makenet.spec_handling.spec_loader import load_experiment_spec
logger = logging.getLogger(__name__)
class ClassificationExporter(Exporter):
"""Define an exporter for classification models."""
def __init__(self, model_path=None,
key=None,
data_type="fp32",
strict_type=False,
backend="uff",
classmap_file=None,
experiment_spec_path="",
onnx_route="keras2onnx",
**kwargs):
"""Initialize the classification exporter.
Args:
model_path (str): Path to the model file.
key (str): Key to load the model.
data_type (str): Path to the TensorRT backend data type.
strict_type(bool): Apply TensorRT strict_type_constraints or not for INT8 mode.
backend (str): TensorRT parser to be used.
classmap_file (str): Path to classmap.json file.
experiment_spec_path (str): Path to MakeNet experiment spec file.
onnx_route (str): Package to be used to convert the keras model to
ONNX.
Returns:
None.
"""
super(ClassificationExporter, self).__init__(model_path=model_path,
key=key,
data_type=data_type,
strict_type=strict_type,
backend=backend,
**kwargs)
self.classmap_file = classmap_file
self.onnx_route = onnx_route
assert self.onnx_route in SUPPORTED_ONNX_ROUTES, (
f"Invaid onnx route {self.onnx_route} requested."
)
logger.info("Setting the onnx export rote to {}".format(
self.onnx_route
))
# Load experiment spec if available.
if os.path.exists(experiment_spec_path):
self.experiment_spec = load_experiment_spec(
experiment_spec_path,
merge_from_default=False,
validation_schema="train_val"
)
self.eff_custom_objs = None
def set_keras_backend_dtype(self):
"""Set the keras backend data type."""
keras.backend.set_learning_phase(0)
tmp_keras_file_name = get_decoded_filename(self.model_path,
self.key,
self.eff_custom_objs)
model_input_dtype = get_model_input_dtype(tmp_keras_file_name)
keras.backend.set_floatx(model_input_dtype)
def load_model(self, backend="uff"):
"""Simple function to get the keras model."""
keras.backend.clear_session()
keras.backend.set_learning_phase(0)
model = model_io(self.model_path, enc_key=self.key, custom_objects=self.eff_custom_objs)
if check_for_quantized_layers(model):
model, self.tensor_scale_dict = self.extract_tensor_scale(model, backend)
return model
def set_input_output_node_names(self):
"""Set input output node names."""
self.output_node_names = ["predictions/Softmax"]
self.input_node_names = ["input_1"]
def load_classmap_file(self):
"""Load the classmap json."""
data = None
with open(self.classmap_file, "r") as cmap_file:
try:
data = json.load(cmap_file)
except json.decoder.JSONDecodeError as e:
print(f"Loading the {self.classmap_file} failed with error\n{e}")
sys.exit(-1)
except Exception as e:
if e.output is not None:
print(f"Classification exporter failed with error {e.output}")
sys.exit(-1)
return data
def get_class_labels(self):
"""Get list of class labels to serialize to a labels.txt file."""
if not os.path.exists(self.classmap_file):
raise FileNotFoundError(
f"Classmap json file not found: {self.classmap_file}")
data = self.load_classmap_file()
if not data:
return []
labels = [""] * len(list(data.keys()))
if not all([class_index < len(labels)
and isinstance(class_index, int)
for class_index in data.values()]):
raise RuntimeError(
"Invalid data in the json file. The class index must "
"be < number of classes and an integer value.")
for class_name, class_index in data.items():
labels[class_index] = class_name
return labels
def generate_ds_config(self, input_dims, num_classes=None):
"""Generate Deepstream config element for the exported model."""
channel_index = 0 if self.data_format == "channels_first" else -1
if input_dims[channel_index] == 1:
color_format = "l"
else:
color_format = "bgr" if self.preprocessing_arguments["flip_channel"] else "rgb"
kwargs = {
"data_format": self.data_format,
"backend": self.backend,
# Setting this to 1 for classification
"network_type": 1
}
if num_classes:
kwargs["num_classes"] = num_classes
if self.backend == "uff":
kwargs.update({
"input_names": self.input_node_names,
"output_names": self.output_node_names
})
ds_config = BaseDSConfig(
self.preprocessing_arguments["scale"],
self.preprocessing_arguments["means"],
input_dims,
color_format,
self.key,
**kwargs
)
return ds_config
def save_exported_file(self, model, output_file_name):
"""Save the exported model file.
This routine converts a keras model to onnx/uff model
based on the backend the exporter was initialized with.
Args:
model (keras.models.Model): Keras model to be saved.
output_file_name (str): Path to the output etlt file.
Returns:
tmp_file_name (str): Path to the temporary uff file.
"""
logger.debug("Saving etlt model file at: {}.".format(output_file_name))
input_tensor_names = ""
# @vpraveen: commented out the preprocessor kwarg from keras_to_uff.
# todo: @vpraveen and @zhimeng, if required modify modulus code to add
# this.
if self.backend == "uff":
input_tensor_names, _, _ = keras_to_uff(
model,
output_file_name,
output_node_names=self.output_node_names,
custom_objects=CUSTOM_OBJS)
elif self.backend == "onnx":
if self.onnx_route == "keras2onnx":
keras_to_onnx(
model,
output_file_name,
custom_objects=CUSTOM_OBJS,
target_opset=self.target_opset
)
else:
os_handle, tmp_pb_file = tempfile.mkstemp(
suffix=".pb"
)
os.close(os_handle)
input_tensor_names, out_tensor_names, _ = keras_to_pb(
model,
tmp_pb_file,
self.output_node_names,
custom_objects=CUSTOM_OBJS
)
if self.output_node_names is None:
self.output_node_names = out_tensor_names
logger.info("Model graph serialized to pb file.")
input_tensor_names, out_tensor_names = pb_to_onnx(
tmp_pb_file,
output_file_name,
input_tensor_names,
self.output_node_names,
self.target_opset,
verbose=False
)
input_tensor_names = ""
else:
raise NotImplementedError("Incompatible backend.")
return output_file_name
def clear_gpus(self):
"""Clear GPU memory before TRT engine building."""
tf.reset_default_graph()
def get_calibrator(self,
calibration_cache,
data_file_name,
n_batches,
batch_size,
input_dims,
calibration_images_dir=None,
image_mean=None):
"""Simple function to get an int8 calibrator.
Args:
calibration_cache (str): Path to store the int8 calibration cache file.
data_file_name (str): Path to the TensorFile. If the tensorfile doesn't exist
at this path, then one is created with either n_batches of random tensors,
images from the file in calibration_images_dir of dimensions
(batch_size,) + (input_dims)
n_batches (int): Number of batches to calibrate the model over.
batch_size (int): Number of input tensors per batch.
input_dims (tuple): Tuple of input tensor dimensions in CHW order.
calibration_images_dir (str): Path to a directory of images to generate the
data_file from.
image_mean (tuple): Pixel mean for channel-wise mean subtraction.
Returns:
calibrator (nvidia_tao_tf1.cv.common.export.base_calibrator.TensorfileCalibrator):
TRTEntropyCalibrator2 instance to calibrate the TensorRT engine.
"""
if self.experiment_spec is not None:
# Get calibrator based on the detectnet dataloader.
calibrator = ClassificationCalibrator(
self.experiment_spec,
calibration_cache,
n_batches,
batch_size)
else:
if not os.path.exists(data_file_name):
self.generate_tensor_file(data_file_name,
calibration_images_dir,
input_dims,
n_batches=n_batches,
batch_size=batch_size)
calibrator = TensorfileCalibrator(data_file_name,
calibration_cache,
n_batches,
batch_size)
return calibrator
| tao_tensorflow1_backend-main | nvidia_tao_tf1/cv/makenet/export/classification_exporter.py |
tao_tensorflow1_backend-main | third_party/__init__.py |
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test configuration."""
from __future__ import absolute_import
import logging
import logging.config
import pytest
"""Root logger for tests."""
logger = logging.getLogger(__name__)
DEFAULT_SEED = 42
@pytest.fixture(scope="function", autouse=True)
def clear_session():
"""Clear the Keras session at the end of a test."""
import keras
import tensorflow as tf
import random
import numpy as np
import third_party.keras.tensorflow_backend
third_party.keras.tensorflow_backend.limit_tensorflow_GPU_mem(gpu_fraction=0.9)
random.seed(DEFAULT_SEED)
np.random.seed(DEFAULT_SEED)
tf.compat.v1.set_random_seed(DEFAULT_SEED)
# Yield and let test run to completion.
yield
# Clear session.
keras.backend.clear_session()
tf.compat.v1.reset_default_graph()
def pytest_addoption(parser):
"""
Verbosity options.
This adds two command-line flags:
--vv for INFO verbosity,
--vvv for DEBUG verbosity.
Example:
pytest -s -v --vv modulus
Default logging verbosity is WARNING.
"""
parser.addoption(
"--vv", action="store_true", default=False, help="log INFO messages."
)
parser.addoption(
"--vvv", action="store_true", default=False, help="log DEBUG messages."
)
def pytest_configure(config):
"""
Pytest configuration.
This is executed after parsing the command line.
"""
if config.getoption("--vvv"):
verbosity = "DEBUG"
elif config.getoption("--vv"):
verbosity = "INFO"
else:
verbosity = "WARNING"
logging.basicConfig(
format="%(asctime)s [%(levelname)s] %(name)s: %(message)s", level=verbosity
)
| tao_tensorflow1_backend-main | third_party/keras/conftest.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Modulus Keras-extensions for mixed-precision training."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import keras
from keras import backend as K
from keras import initializers
from keras.engine import InputSpec
from keras.legacy import interfaces
from keras.regularizers import Regularizer
from keras.utils import conv_utils
"""Logger for Keras tensorflow backend."""
logger = logging.getLogger(__name__)
@interfaces.legacy_add_weight_support
def _layer_add_weight(
self,
name,
shape,
dtype=None,
initializer=None,
regularizer=None,
trainable=True,
constraint=None,
):
"""Add a weight variable to the layer.
# Arguments
name: String, the name for the weight variable.
shape: The shape tuple of the weight.
dtype: The dtype of the weight.
initializer: An Initializer instance (callable).
regularizer: An optional Regularizer instance.
trainable: A boolean, whether the weight should
be trained via backprop or not (assuming
that the layer itself is also trainable).
constraint: An optional Constraint instance.
# Returns
The created weight variable.
"""
initializer = initializers.get(initializer)
# If dtype is given, use it directly.
if dtype:
variable_dtype = output_dtype = dtype
# In mixed precision training, by default, variables are created fp32 and cast to fp16.
elif not dtype and K.floatx() == "float16":
variable_dtype = "float32"
output_dtype = "float16"
# If dtype is not given, use the global default.
else:
variable_dtype = output_dtype = K.floatx()
weight = K.variable(
initializer(shape, dtype=variable_dtype),
dtype=variable_dtype,
name=name,
constraint=constraint,
)
if regularizer is not None:
self.add_loss(regularizer(weight))
if trainable:
self._trainable_weights.append(weight)
else:
self._non_trainable_weights.append(weight)
# For mixed-precision training, return a cast version of the variable.
if output_dtype != variable_dtype:
return K.cast(weight, output_dtype)
return weight
def _batch_normalization_build(self, input_shape):
dim = input_shape[self.axis]
if dim is None:
raise ValueError(
"Axis " + str(self.axis) + " of "
"input tensor should have a defined dimension "
"but the layer received an input with shape " + str(input_shape) + "."
)
self.input_spec = InputSpec(ndim=len(input_shape), axes={self.axis: dim})
shape = (dim,)
# For mixed-precision training, BN variables have to be created as float32.
if K.floatx() == "float16":
dtype_for_bn_variables = "float32"
else:
dtype_for_bn_variables = None
if self.scale:
self.gamma = self.add_weight(
shape=shape,
name="gamma",
initializer=self.gamma_initializer,
regularizer=self.gamma_regularizer,
constraint=self.gamma_constraint,
dtype=dtype_for_bn_variables,
)
else:
self.gamma = None
if self.center:
self.beta = self.add_weight(
shape=shape,
name="beta",
initializer=self.beta_initializer,
regularizer=self.beta_regularizer,
constraint=self.beta_constraint,
dtype=dtype_for_bn_variables,
)
else:
self.beta = None
self.moving_mean = self.add_weight(
shape=shape,
name="moving_mean",
initializer=self.moving_mean_initializer,
trainable=False,
dtype=dtype_for_bn_variables,
)
self.moving_variance = self.add_weight(
shape=shape,
name="moving_variance",
initializer=self.moving_variance_initializer,
trainable=False,
dtype=dtype_for_bn_variables,
)
self.built = True
def _batch_normalization_call(self, inputs, training=None):
input_shape = K.int_shape(inputs)
# Prepare broadcasting shape.
ndim = len(input_shape)
reduction_axes = list(range(len(input_shape)))
del reduction_axes[self.axis]
broadcast_shape = [1] * len(input_shape)
broadcast_shape[self.axis] = input_shape[self.axis]
# Determines whether broadcasting is needed.
needs_broadcasting = sorted(reduction_axes) != list(range(ndim))[:-1]
def normalize_inference():
if needs_broadcasting:
# In this case we must explicitly broadcast all parameters.
broadcast_moving_mean = K.reshape(self.moving_mean, broadcast_shape)
broadcast_moving_variance = K.reshape(self.moving_variance, broadcast_shape)
if self.center:
broadcast_beta = K.reshape(self.beta, broadcast_shape)
else:
broadcast_beta = None
if self.scale:
broadcast_gamma = K.reshape(self.gamma, broadcast_shape)
else:
broadcast_gamma = None
return K.batch_normalization(
inputs,
K.cast(broadcast_moving_mean, inputs.dtype),
K.cast(broadcast_moving_variance, inputs.dtype),
K.cast(broadcast_beta, inputs.dtype),
K.cast(broadcast_gamma, inputs.dtype),
axis=self.axis,
epsilon=self.epsilon,
)
else:
return K.batch_normalization(
inputs,
K.cast(self.moving_mean, inputs.dtype),
K.cast(self.moving_variance, inputs.dtype),
K.cast(self.beta, inputs.dtype),
K.cast(self.gamma, inputs.dtype),
axis=self.axis,
epsilon=self.epsilon,
)
# If the learning phase is *static* and set to inference:
if training in {0, False}:
return normalize_inference()
# If the learning is either dynamic, or set to training:
normed_training, mean, variance = K.normalize_batch_in_training(
inputs, self.gamma, self.beta, reduction_axes, epsilon=self.epsilon
)
# if K.backend() != 'cntk':
# sample_size = K.prod([K.shape(inputs)[axis]
# for axis in reduction_axes])
# sample_size = K.cast(sample_size, dtype=K.dtype(variance))
# # sample variance - unbiased estimator of population variance
# variance *= sample_size / (sample_size - (1.0 + self.epsilon))
self.add_update(
[
K.moving_average_update(self.moving_mean, mean, self.momentum),
K.moving_average_update(self.moving_variance, variance, self.momentum),
],
inputs,
)
# Pick the normalized form corresponding to the training phase.
return K.in_train_phase(normed_training, normalize_inference, training=training)
class _RegularizerL1L2(Regularizer):
"""Regularizer for L1 and L2 regularization.
# Arguments
l1: Float; L1 regularization factor.
l2: Float; L2 regularization factor.
"""
def __init__(self, l1=0.0, l2=0.0):
self.l1 = l1
self.l2 = l2
def __call__(self, x):
regularization = 0.0
if self.l1:
regularization += K.sum(K.cast(self.l1, x.dtype) * K.abs(x))
if self.l2:
regularization += K.sum(K.cast(self.l2, x.dtype) * K.square(x))
return regularization
def get_config(self):
return {"l1": float(self.l1), "l2": float(self.l2)}
def _conv2dtranspose_call(self, inputs):
input_shape = K.shape(inputs)
batch_size = input_shape[0]
if self.data_format == "channels_first":
h_axis, w_axis = 2, 3
else:
h_axis, w_axis = 1, 2
height, width = input_shape[h_axis], input_shape[w_axis]
kernel_h, kernel_w = self.kernel_size
stride_h, stride_w = self.strides
if self.output_padding is None:
out_pad_h = out_pad_w = None
else:
out_pad_h, out_pad_w = self.output_padding
# Infer the dynamic output shape:
out_height = conv_utils.deconv_length(
height, stride_h, kernel_h, self.padding, out_pad_h, self.dilation_rate[0]
)
out_width = conv_utils.deconv_length(
width, stride_w, kernel_w, self.padding, out_pad_w, self.dilation_rate[1]
)
if self.data_format == "channels_first":
output_shape = (batch_size, self.filters, out_height, out_width)
else:
output_shape = (batch_size, out_height, out_width, self.filters)
outputs = K.conv2d_transpose(
inputs,
self.kernel,
output_shape,
self.strides,
padding=self.padding,
data_format=self.data_format,
dilation_rate=self.dilation_rate,
)
if self.use_bias:
outputs = K.bias_add(outputs, self.bias, data_format=self.data_format)
if self.activation is not None:
return self.activation(outputs)
return outputs
def patch():
"""Apply the patches to the module."""
_layer_add_weight.__name__ = "add_weight"
keras.engine.Layer.add_weight = _layer_add_weight
_batch_normalization_build.__name__ = "build"
keras.layers.BatchNormalization.build = _batch_normalization_build
_batch_normalization_call.__name__ = "call"
keras.layers.BatchNormalization.call = _batch_normalization_call
_RegularizerL1L2.__name__ = "L1L2"
keras.regularizers.L1L2 = _RegularizerL1L2
_conv2dtranspose_call.__name__ = "call"
keras.layers.Conv2DTranspose.call = _conv2dtranspose_call
| tao_tensorflow1_backend-main | third_party/keras/mixed_precision.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test the Keras backend changes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import keras
from keras import backend as K
import numpy as np
from numpy.testing import assert_allclose
import pytest
import tensorflow as tf
import third_party.keras.tensorflow_backend
FILTERS = 5
conv2d_tests = [
# channels_last, valid and same, and with different striding.
((1, 4, 4, 3), (1, 4, 4, FILTERS), (3, 3), (1, 1), "same", "channels_last"),
((1, 4, 4, 3), (1, 2, 2, FILTERS), (3, 3), (2, 2), "same", "channels_last"),
((1, 4, 4, 3), (1, 2, 2, FILTERS), (3, 3), (1, 1), "valid", "channels_last"),
((1, 4, 4, 3), (1, 1, 1, FILTERS), (3, 3), (2, 2), "valid", "channels_last"),
# channels_first, valid and same, and with different striding.
((1, 3, 4, 4), (1, FILTERS, 4, 4), (3, 3), (1, 1), "same", "channels_first"),
((1, 3, 4, 4), (1, FILTERS, 2, 2), (3, 3), (2, 2), "same", "channels_first"),
((1, 3, 4, 4), (1, FILTERS, 2, 2), (3, 3), (1, 1), "valid", "channels_first"),
((1, 3, 4, 4), (1, FILTERS, 1, 1), (3, 3), (2, 2), "valid", "channels_first"),
]
@pytest.mark.parametrize(
"shape,expected_shape,kernel,strides,padding,data_format", conv2d_tests
)
def test_conv2d(shape, expected_shape, kernel, strides, padding, data_format):
"""Test the conv2d padding and data-format compatibility and shapes.
These checks are needed, as we have patched it to use explicit symmetric padding.
"""
inputs = tf.placeholder(shape=shape, dtype=tf.float32)
x = keras.layers.Conv2D(
filters=FILTERS,
kernel_size=kernel,
strides=strides,
padding=padding,
data_format=data_format,
)(inputs)
sess = keras.backend.get_session()
out = sess.run(x, feed_dict={inputs: np.zeros(shape)})
output_shape = out.shape
assert output_shape == expected_shape
conv2d_transpose_tests = [
# channels_last, valid and same, and with different striding.
((1, 4, 4, 3), (1, 4, 4, FILTERS), (3, 3), (1, 1), "same", "channels_last"),
((1, 4, 4, 3), (1, 8, 8, FILTERS), (3, 3), (2, 2), "same", "channels_last"),
((1, 4, 4, 3), (1, 6, 6, FILTERS), (3, 3), (1, 1), "valid", "channels_last"),
((1, 4, 4, 3), (1, 9, 9, FILTERS), (3, 3), (2, 2), "valid", "channels_last"),
# channels_first, valid and same, and with different striding.
((1, 3, 4, 4), (1, FILTERS, 4, 4), (3, 3), (1, 1), "same", "channels_first"),
((1, 3, 4, 4), (1, FILTERS, 8, 8), (3, 3), (2, 2), "same", "channels_first"),
((1, 3, 4, 4), (1, FILTERS, 6, 6), (3, 3), (1, 1), "valid", "channels_first"),
((1, 3, 4, 4), (1, FILTERS, 9, 9), (3, 3), (2, 2), "valid", "channels_first"),
]
@pytest.mark.parametrize(
"shape,expected_shape,kernel,strides,padding,data_format", conv2d_transpose_tests
)
def test_conv2d_transpose(shape, expected_shape, kernel, strides, padding, data_format):
inputs = tf.placeholder(shape=shape, dtype=tf.float32)
x = keras.layers.Conv2DTranspose(
filters=FILTERS,
kernel_size=kernel,
strides=strides,
padding=padding,
data_format=data_format,
)(inputs)
sess = keras.backend.get_session()
out = sess.run(x, feed_dict={inputs: np.zeros(shape)})
output_shape = out.shape
assert output_shape == expected_shape
bias_tests = [
((1, 3, 4, 4), (3), "channels_first"),
((1, 4, 4, 3), (3), "channels_last"),
]
@pytest.mark.parametrize("input_shape,bias_shape,data_format", bias_tests)
def test_bias(input_shape, bias_shape, data_format):
"""Test the bias_add improvements.
These improvements allow for native usage of NCHW format without transposing or reshaping.
The bias will never change the output size, so we only check if this function does not fail.
"""
inputs = tf.placeholder(shape=input_shape, dtype=tf.float32)
bias = tf.placeholder(shape=bias_shape, dtype=tf.float32)
x = keras.backend.bias_add(inputs, bias, data_format=data_format)
sess = keras.backend.get_session()
sess.run(x, feed_dict={inputs: np.zeros(input_shape), bias: np.zeros(bias_shape)})
batch_norm_tests = [
# All reduction
((1024, 3, 4, 4), -1),
# NCHW reduction
((1024, 3, 2, 2), 1),
# NHWC reduction
((1024, 2, 2, 3), 3),
]
@pytest.mark.parametrize("input_shape,axis", batch_norm_tests)
@pytest.mark.parametrize("is_training", [True])
def test_batchnorm_correctness(mocker, input_shape, axis, is_training):
"""Test batchnorm by training it on a normal distributed that is offset and scaled."""
mocker.spy(tf.nn, "fused_batch_norm")
inputs = keras.layers.Input(shape=input_shape[1:])
bn_layer = keras.layers.normalization.BatchNormalization(axis=axis, momentum=0.8)
bn = bn_layer(inputs)
model = keras.models.Model(inputs=inputs, outputs=bn)
model.compile(loss="mse", optimizer="sgd")
mean, var = 5, 10
x = np.random.normal(loc=mean, scale=var, size=input_shape)
model.fit(x, x, epochs=50, verbose=0)
beta = K.eval(bn_layer.beta)
gamma = K.eval(bn_layer.gamma)
nchannels = input_shape[axis]
beta_expected = np.array([mean] * nchannels, dtype=np.float32)
gamma_expected = np.array([var] * nchannels, dtype=np.float32)
# Test if batchnorm has learned the correct mean and variance
assert_allclose(beta, beta_expected, atol=5e-1)
assert_allclose(gamma, gamma_expected, atol=5e-1)
if axis == 1 and not third_party.keras.tensorflow_backend._has_nchw_support():
pytest.skip("Fused batchnorm with NCHW only supported on GPU.")
# Test that the fused batch norm was actually called. It's called twice: training or not.
assert tf.nn.fused_batch_norm.call_count == 1
def test_data_format():
assert keras.backend.image_data_format() == "channels_first"
pool2d_tests = [
# channels_last, valid and same, and with different striding.
((1, 4, 4, 3), (1, 4, 4, 3), (3, 3), (1, 1), "same", "channels_last", "max"),
((1, 4, 4, 3), (1, 2, 2, 3), (3, 3), (2, 2), "same", "channels_last", "average"),
((1, 4, 4, 3), (1, 2, 2, 3), (3, 3), (1, 1), "valid", "channels_last", "max"),
((1, 4, 4, 3), (1, 1, 1, 3), (3, 3), (2, 2), "valid", "channels_last", "average"),
# channels_first, valid and same, and with different striding.
((1, 3, 4, 4), (1, 3, 4, 4), (3, 3), (1, 1), "same", "channels_first", "max"),
((1, 3, 4, 4), (1, 3, 2, 2), (3, 3), (2, 2), "same", "channels_first", "average"),
((1, 3, 4, 4), (1, 3, 2, 2), (3, 3), (1, 1), "valid", "channels_first", "max"),
((1, 3, 4, 4), (1, 3, 1, 1), (3, 3), (2, 2), "valid", "channels_first", "average"),
]
@pytest.mark.parametrize(
"shape,expected_shape,pool_size,strides,padding,data_format," "pooling_type",
pool2d_tests,
)
def test_pool2d(
shape, expected_shape, pool_size, strides, padding, data_format, pooling_type
):
"""Test the pooling padding and data-format compatibility and shapes.
These checks are needed, as we have patched it to use explicit symmetric padding.
"""
if pooling_type == "max":
layer_class = keras.layers.MaxPooling2D
elif pooling_type == "average":
layer_class = keras.layers.AveragePooling2D
else:
raise ValueError("Unknown pooling type: %s" % pooling_type)
inputs = tf.placeholder(shape=shape, dtype=tf.float32)
x = layer_class(
pool_size=pool_size, strides=strides, padding=padding, data_format=data_format
)(inputs)
sess = keras.backend.get_session()
out = sess.run(x, feed_dict={inputs: np.zeros(shape)})
output_shape = out.shape
assert output_shape == expected_shape
def test_patch_dataset_map():
class _RandomSeedRecorder:
def __init__(self):
self.random_seed = None
def __call__(self, elt):
self.random_seed = tf.get_default_graph().seed
return elt
seed = 37
tf.set_random_seed(seed)
recorder = _RandomSeedRecorder()
ds = tf.data.Dataset.from_generator(lambda: (yield (0)), (tf.int64))
# Patch is already applied at this point, but if you comment it out and uncomment below code you
# can verify that this bug still exists and needs to be patched. If the test fails it may be
# that TF fixed this and we can remove the patch. Talk to @ehall
# ds.map(recorder)
# assert recorder.random_seed is None
#
# third_party.keras.tensorflow_backend._patch_dataset_map()
# Test that existing dataset got patched.
ds.map(recorder)
assert recorder.random_seed == seed
# Test that new dataset also gets patched.
recorder = _RandomSeedRecorder()
ds = tf.data.Dataset.from_generator(lambda: (yield (0)), (tf.int64))
ds.map(recorder)
assert recorder.random_seed == seed
def test_reproducible_mapping():
def test():
tf.reset_default_graph()
np.random.seed(42)
tf.set_random_seed(42)
images = np.random.rand(100, 64, 64, 3).astype(np.float32)
def mapfn(p):
return tf.image.random_hue(p, 0.04)
dataset = tf.data.Dataset.from_tensor_slices(images)
dataset = dataset.map(mapfn)
dataset = dataset.batch(32)
x = dataset.make_one_shot_iterator().get_next()
with tf.Session() as sess:
return sess.run(x)
assert np.allclose(test(), test()), "num_parallel_calls=1 undeterministic"
| tao_tensorflow1_backend-main | third_party/keras/test_tensorflow_backend.py |
tao_tensorflow1_backend-main | third_party/keras/__init__.py |
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Modulus Keras-specific Extensions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import keras
from keras.backend import image_data_format
from keras.backend.tensorflow_backend import _preprocess_padding
import tensorflow as tf
from tensorflow.python.training import moving_averages
"""Logger for Keras tensorflow backend."""
logger = logging.getLogger(__name__)
DATA_FORMAT_MAP = {"channels_first": "NCHW", "channels_last": "NHWC"}
def _has_nchw_support():
"""Check whether the current scope supports NCHW ops.
Tensorflow does not support NCHW on CPU. Therefore we check if we are not explicitly put on
CPU, and have GPUs available. In this case there will be soft-placing on the GPU device.
Returns:
bool: if the current scope device placement would support nchw.
"""
# TODO:@subha This will be removed in the future when UNET completely moves to
# Tf.keras. Since unet uses estimator.train it internally converts the mdoel
# to tf.keras though model was built with pure keras. The _is_current_explicit_device
# has a function `_TfDeviceCaptureOp` that does not have attribute `_set_device_from_string`
# This is an error of keras backend: https://github.com/tensorflow/tensorflow/issues/30728
# Hence I catch the error and import from tensorflow.python.keras.backend
# that has the implementation for `_set_device_from_string`.
try:
from keras.backend.tensorflow_backend import _is_current_explicit_device
explicitly_on_cpu = _is_current_explicit_device("CPU")
except AttributeError:
# If tf.keras is used
from tensorflow.python.keras.backend import _is_current_explicit_device
explicitly_on_cpu = _is_current_explicit_device("CPU")
gpus_available = True # We always assume there is a GPU available.
return not explicitly_on_cpu and gpus_available
def conv2d(
x, kernel, strides=(1, 1), padding="valid", data_format=None, dilation_rate=(1, 1)
):
"""2D convolution.
Arguments:
x: Tensor or variable.
kernel: kernel tensor.
strides: strides tuple.
padding: string, `"same"` or `"valid"`.
data_format: string, `"channels_last"` or `"channels_first"`.
dilation_rate: tuple of 2 integers.
Returns:
A tensor, result of 2D convolution.
Raises:
ValueError: if `data_format` is neither `channels_last` or `channels_first`.
"""
if data_format is None:
data_format = image_data_format()
if data_format not in DATA_FORMAT_MAP:
raise ValueError("Unknown data_format " + str(data_format))
tf_data_format = DATA_FORMAT_MAP[data_format]
# Avoid Tensorflow's implicit assymetric padding by explicit symmetric padding
# See https://stackoverflow.com/questions/42924324/tensorflows-asymmetric-padding-assumptions
if padding == "same":
filter_shape = kernel.get_shape()
width_padding = ((filter_shape[0].value - 1) * dilation_rate[0] + 1) // 2
height_padding = ((filter_shape[1].value - 1) * dilation_rate[1] + 1) // 2
if tf_data_format == "NCHW":
padding_pattern = [
[0, 0],
[0, 0],
[width_padding, width_padding],
[height_padding, height_padding],
]
else: # 'NHWC'
padding_pattern = [
[0, 0],
[width_padding, width_padding],
[height_padding, height_padding],
[0, 0],
]
x = tf.pad(x, padding_pattern, mode="CONSTANT")
padding = "valid"
nhwc_roundtrip = not _has_nchw_support() and tf_data_format == "NCHW"
if nhwc_roundtrip:
tf_data_format = "NHWC"
x = tf.transpose(x, (0, 2, 3, 1)) # NCHW -> NHWC
padding = _preprocess_padding(padding)
x = tf.nn.convolution(
input=x,
filter=kernel,
dilation_rate=dilation_rate,
strides=strides,
padding=padding,
data_format=tf_data_format,
)
if nhwc_roundtrip:
x = tf.transpose(x, (0, 3, 1, 2)) # NCHW -> NHWC
return x
def pool2d(
x, pool_size, strides=(1, 1), padding="valid", data_format=None, pool_mode="max"
):
"""2D Pooling.
# Arguments
x: Tensor or variable.
pool_size: tuple of 2 integers.
strides: tuple of 2 integers.
padding: string, `"same"` or `"valid"`.
data_format: string, `"channels_last"` or `"channels_first"`.
pool_mode: string, `"max"` or `"avg"`.
# Returns
A tensor, result of 2D pooling.
# Raises
ValueError: if `data_format` is neither `"channels_last"` or `"channels_first"`.
ValueError: if `pool_mode` is neither `"max"` or `"avg"`.
"""
if data_format is None:
data_format = image_data_format()
if data_format not in DATA_FORMAT_MAP:
raise ValueError("Unknown data_format " + str(data_format))
tf_data_format = DATA_FORMAT_MAP[data_format]
# Avoid Tensorflow's implicit assymetric padding by explicit symmetric padding
if padding == "same":
width_padding = ((pool_size[0] - 1)) // 2
height_padding = ((pool_size[1] - 1)) // 2
if tf_data_format == "NCHW":
padding_pattern = [
[0, 0],
[0, 0],
[width_padding, width_padding],
[height_padding, height_padding],
]
else: # 'NHWC'
padding_pattern = [
[0, 0],
[width_padding, width_padding],
[height_padding, height_padding],
[0, 0],
]
x = tf.pad(x, padding_pattern, mode="CONSTANT")
padding = "valid"
nhwc_roundtrip = not _has_nchw_support() and tf_data_format == "NCHW"
if nhwc_roundtrip:
tf_data_format = "NHWC"
x = tf.transpose(x, (0, 2, 3, 1)) # NCHW -> NHWC
if nhwc_roundtrip or tf_data_format == "NHWC":
strides = (1,) + strides + (1,)
pool_size = (1,) + pool_size + (1,)
else:
strides = (1, 1) + strides
pool_size = (1, 1) + pool_size
padding = _preprocess_padding(padding)
if pool_mode == "max":
x = tf.nn.max_pool(
x, pool_size, strides, padding=padding, data_format=tf_data_format
)
elif pool_mode == "avg":
x = tf.nn.avg_pool(
x, pool_size, strides, padding=padding, data_format=tf_data_format
)
else:
raise ValueError("Invalid pooling mode:", pool_mode)
if nhwc_roundtrip:
x = tf.transpose(x, (0, 3, 1, 2)) # NCHW -> NHWC
return x
def moving_average_update(x, value, momentum):
"""Compute the moving average of a variable.
# Arguments
x: A `Variable`.
value: A tensor with the same shape as `x`.
momentum: The moving average momentum.
# Returns
An operation to update the variable.
"""
# See: https://github.com/keras-team/keras/commit/3ce40705a7235cabe81cfaa2ab9b9d56f225af52
return moving_averages.assign_moving_average(
x, value, momentum, zero_debias=False
) # A zero_debias==True creates unwanted tf variables.
def batch_normalization(x, mean, var, beta, gamma, axis=-1, epsilon=1e-3):
"""Applies batch normalization on x given mean, var, beta and gamma.
I.e. returns:
`output = (x - mean) / sqrt(var + epsilon) * gamma + beta`
# Arguments
x: Input tensor or variable.
mean: Mean of batch.
var: Variance of batch.
beta: Tensor with which to center the input.
gamma: Tensor by which to scale the input.
axis: Integer, the axis that should be normalized.
(typically the features axis).
epsilon: Fuzz factor.
# Returns
A tensor.
TODO(xiangbok): Fixes a bug in keras v2.2.4, this function is adapted from #1df4052.
"""
# if ndim(x) == 4:
# # The CPU implementation of FusedBatchNorm only support NHWC
# if axis == 1 or axis == -3:
# tf_data_format = 'NCHW'
# elif axis == 3 or axis == -1:
# tf_data_format = 'NHWC'
# else:
# tf_data_format = None
# if (x.dtype != tf.float16 and # fused bn doesn't support fp16.
# (tf_data_format == 'NHWC' or (tf_data_format == 'NCHW' and _has_nchw_support()))):
# # The mean / var / beta / gamma may be processed by broadcast
# # so it may have extra axes with 1,
# # it is not needed and should be removed
# if ndim(mean) > 1:
# mean = tf.reshape(mean, [-1])
# if ndim(var) > 1:
# var = tf.reshape(var, [-1])
# if beta is None:
# beta = zeros_like(mean)
# elif ndim(beta) > 1:
# beta = tf.reshape(beta, [-1])
# if gamma is None:
# gamma = ones_like(mean)
# elif ndim(gamma) > 1:
# gamma = tf.reshape(gamma, [-1])
# y, _, _ = tf.nn.fused_batch_norm(
# x,
# gamma,
# beta,
# epsilon=epsilon,
# mean=mean,
# variance=var,
# data_format=tf_data_format,
# is_training=False
# )
# return y
# default
return tf.nn.batch_normalization(x, mean, var, beta, gamma, epsilon)
def softmax(x, axis=-1):
"""Softmax activation function.
Patched to allow use of the backend's `softmax` regardless of the
cardinality of the input dimensions.
# Arguments
x: Input tensor.
axis: Integer, axis along which the softmax normalization is applied.
# Returns
Tensor, output of softmax transformation.
# Raises
ValueError: In case `dim(x) == 1`.
"""
ndim = keras.backend.ndim(x)
if ndim == 4 and axis == 1:
# Nvbug 2356150: in the "channels_first" case tf.nn.softmax adds a channel swap
# roundtrip to perform the softmax in "channels_last" order. The channel swap is done
# through tensor shape manipulations, which TensorRT cannot handle (TensorRT needs
# the permutation vector to be a constant). Below is a workaround for the NCHW softmax.
# Transpose to "channels_last" order.
x = tf.transpose(x, perm=[0, 2, 3, 1])
# Do the softmax in "channels_last" order (do not necessitate transpose).
x = tf.nn.softmax(x, axis=-1)
# Tranpose back to "channels_first".
x = tf.transpose(x, perm=[0, 3, 1, 2])
return x
if ndim >= 2:
return tf.nn.softmax(x, axis=axis)
raise ValueError(
"Cannot apply softmax to a tensor that is 1D. " "Received input: %s" % x
)
def flatten_call(self, inputs):
"""call method of Flatten layer."""
# Overrides the suboptimal change added to keras that makes Flatten layers' channels_first
# to be export incompatible (reverts https://github.com/keras-team/keras/pull/9696).
return keras.backend.batch_flatten(inputs)
def _patch_backend_function(f):
"""Patch keras backend functionality.
The patch is applied to both the general keras backend and the framework specific backend.
Args:
f (func): a function with the same name as exists in the keras backend.
"""
name = f.__name__
logger.debug("Patching %s" % name)
keras.backend.__setattr__(name, f)
keras.backend.tensorflow_backend.__setattr__(name, f)
def _patch_dataset_map():
"""
Patches `tf.data.Dataset.map` function which properly sets the random seeds.
Patches with a wrapped version of the original method which properly sets the random seeds in
in the context of the subgraph created by the map operation.
This patch addresses the problem that the random seed is not set in the graph used by the
augmentations and other functions which are applied via the map operation. This issue was seen
in TF v13.1.
"""
# See https://github.com/tensorflow/tensorflow/issues/29101
old_map = tf.data.Dataset.map
def new_map(self, map_func, num_parallel_calls=None):
seed = tf.get_default_graph().seed
def _map_func_set_random_wrapper(*args, **kwargs):
tf.set_random_seed(seed)
return map_func(*args, **kwargs)
return old_map(
self, _map_func_set_random_wrapper, num_parallel_calls=num_parallel_calls
)
tf.data.Dataset.map = new_map
def patch():
"""Apply the patches to the module."""
_patch_backend_function(conv2d)
_patch_backend_function(pool2d)
_patch_backend_function(moving_average_update)
_patch_backend_function(batch_normalization)
_patch_backend_function(_has_nchw_support)
_patch_dataset_map()
keras.layers.activations.__setattr__("softmax", softmax)
keras.layers.Flatten.call = flatten_call
keras.backend.set_image_data_format("channels_first")
def limit_tensorflow_GPU_mem(gpu_fraction=0.33):
"""Limit TensorFlow memory usage.
Configure TensorFlow to grow its memory pool up to specified limit instead of
greedily allocating all available GPU memory.
Args:
gpu_fraction (float): maximum fraction of GPU memory in TensorFlow pool
"""
def get_session(gpu_fraction):
gpu_options = tf.GPUOptions(
per_process_gpu_memory_fraction=gpu_fraction, allow_growth=True
)
return tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
keras.backend.set_session(get_session(gpu_fraction=gpu_fraction))
| tao_tensorflow1_backend-main | third_party/keras/tensorflow_backend.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test the Keras backend changes related to mixed-precision implementation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from keras import backend as K
from keras.engine import Layer
from keras.layers import BatchNormalization
from keras.layers import Conv2D
from keras.layers import Input
from keras.models import Model
from keras.regularizers import l1
from keras.regularizers import l2
import numpy as np
from numpy.testing import assert_allclose
import pytest
import tensorflow as tf
WEIGHT_SHAPE = (3, 3, 3)
DATA_SHAPE = (24, 3, 16, 32)
BATCH_NORM_VARIABLE_NAMES = ("beta", "gamma", "moving_mean", "moving_variance")
def _get_tf_weight_by_name(name):
"""Get Tensorflow variable based on a partial variable name."""
candidates = [
var
for var in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
if name in var.name
]
assert len(candidates) == 1, "Use unique variable names."
return candidates[0]
@pytest.mark.usefixtures("clear_session")
def test_layer_add_weight_fp32():
"""Test that keras.engine.Layer.add_weight is correctly patched for fp32 mode."""
K.set_floatx("float32")
# Create a layer and a weight.
fp32_layer = Layer()
keras_weight = fp32_layer.add_weight(
name="fp32_mode_weight", shape=WEIGHT_SHAPE, initializer="ones"
)
# For fp32, returned weight has to match the backend variable.
backend_variable = _get_tf_weight_by_name("fp32_mode_weight")
assert backend_variable == keras_weight, "add_weight returned an unknown tensor."
# Get the values and verify data type and shape.
sess = K.get_session()
np_keras_weight = sess.run(keras_weight)
assert np_keras_weight.dtype == np.float32
assert_allclose(np_keras_weight, np.ones(WEIGHT_SHAPE, dtype=np.float32))
@pytest.mark.usefixtures("clear_session")
def test_layer_add_weight_fp16():
"""Test that keras.engine.Layer.add_weight is correctly patched for fp16 mode."""
K.set_floatx("float16")
# Create a layer and a weight.
fp16_layer = Layer()
keras_weight = fp16_layer.add_weight(
name="fp16_mode_weight", shape=WEIGHT_SHAPE, initializer="ones"
)
# For fp16, returned weight shall not match the backend variable.
backend_variable = _get_tf_weight_by_name("fp16_mode_weight")
assert backend_variable != keras_weight, "add_weight returned a raw variable."
# Get the values and verify data type and shape.
sess = K.get_session()
np_keras_weight = sess.run(keras_weight)
np_backend_variable = sess.run(backend_variable)
assert np_keras_weight.dtype == np.float16
assert_allclose(np_keras_weight, np.ones(WEIGHT_SHAPE, dtype=np.float16))
# In mixed-precision training, backend variables are created in float32.
assert np_backend_variable.dtype == np.float32
assert_allclose(np_backend_variable, np.ones(WEIGHT_SHAPE, dtype=np.float32))
@pytest.mark.usefixtures("clear_session")
@pytest.mark.parametrize("data_type", ["float16", "float32"])
def test_batch_normalization(data_type):
"""Test that patched build and call are in use in BatchNormalization layer."""
# Set backend precision.
K.set_floatx(data_type)
# Create dummy data.
np_ones = np.ones(DATA_SHAPE, dtype=data_type)
# Build the training graph.
K.set_learning_phase(1)
# Placeholder for input data.
train_input = tf.placeholder(dtype=data_type, shape=DATA_SHAPE, name="train_data")
input_layer = Input(tensor=train_input, name="train_input_layer")
# Add one batch normalization layer.
bn_out = BatchNormalization(axis=1, name="batchnorm_layer")(input_layer)
# Get the model and its output.
model = Model(inputs=input_layer, outputs=bn_out, name="dummy_model")
train_output = model(train_input)
# Build inference graph.
K.set_learning_phase(0)
infer_input = tf.placeholder(dtype=data_type, shape=DATA_SHAPE, name="infer_data")
infer_output = model(infer_input)
# Verify that all backend variables were created as float32_ref.
for variable_name in BATCH_NORM_VARIABLE_NAMES:
# Get backend variable by name
var = _get_tf_weight_by_name(variable_name)
# tf.float32_ref object does not exist, serialize and text compare.
assert "float32_ref" in str(
var.dtype
), "BatchNormalization created wrong variable dtype."
# Verify that training and inference outputs follow Keras floatx setting.
assert bn_out.dtype == data_type, "Wrong training output data type."
assert infer_output.dtype == data_type, "Wrong inference output data type."
# Infer with BN initial moving mean and variance (shall not modify the input).
sess = K.get_session()
net_out = sess.run(infer_output, feed_dict={infer_input: np_ones})
assert_allclose(net_out, np_ones, atol=1e-3)
# Build cost and optimizer.
K.set_learning_phase(1)
cost = tf.reduce_sum(train_output)
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.0)
train_op = optimizer.minimize(loss=cost)
# Run one step of training.
_, net_out = sess.run([train_op, train_output], feed_dict={train_input: np_ones})
# Verify that BN removed mean -> all outputs should be zeros.
assert_allclose(net_out, np.zeros_like(net_out, dtype=data_type), atol=1e-4)
@pytest.mark.usefixtures("clear_session")
@pytest.mark.parametrize("data_type", ["float16", "float32"])
def test_regularizers(data_type):
# Set backend precision.
K.set_floatx(data_type)
# Use very small weights (will round to zero in non-patched fp16 implementation).
l1_regularizer = l1(1e-9)
l2_regularizer = l2(1e-9)
# Create a convolutional model.
K.set_learning_phase(1)
train_input = tf.placeholder(dtype=data_type, shape=DATA_SHAPE, name="train_data")
input_layer = Input(tensor=train_input, name="train_input_layer")
conv1_out = Conv2D(
1,
(3, 3),
data_format="channels_first",
kernel_regularizer=l1_regularizer,
name="convolutional_layer_1",
)(input_layer)
conv2_out = Conv2D(
1,
(3, 3),
data_format="channels_first",
kernel_regularizer=l2_regularizer,
name="convolutional_layer_2",
)(conv1_out)
# Get the model and regularization losses.
model = Model(inputs=input_layer, outputs=conv2_out, name="dummy_model")
reg_losses = model.losses
# Get the regularization losses with dummy input.
np_ones = np.ones(DATA_SHAPE, dtype=data_type)
sess = K.get_session()
loss_values = sess.run(reg_losses, feed_dict={train_input: np_ones})
# Verify regularization loss data types.
assert [loss.dtype for loss in loss_values] == [
np.float32,
np.float32,
], "Regularization loss dtype shall match backend variable dtype (always float32)."
# Verify that regularization loss is not zero.
assert np.all(np.array(loss_values) > 1e-10), "Regularization loss is zero."
| tao_tensorflow1_backend-main | third_party/keras/test_mixed_precision.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pybind Test."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy
import pytest
import sys
def test_import_and_use():
import third_party.pybind11.py_bind_test_lib as py_bind_test_lib
assert 3 == py_bind_test_lib.add(5, -2)
if __name__ == "__main__":
sys.exit(pytest.main([__file__]))
| tao_tensorflow1_backend-main | third_party/pybind11/py_bind_test.py |
"""Smoke tests for the built horovod wheel."""
import horovod
def test_built_horovod_version():
"""Test horovod available in ai-infra at the correct version."""
assert horovod.__version__ == "0.22.1"
def test_lazy_horovod_init():
"""Test horovod with tensorflow lazy initialization."""
import horovod.tensorflow as hvd
hvd.init()
| tao_tensorflow1_backend-main | third_party/horovod/test_horovod.py |
import subprocess
import os
subprocess.call(
"{}/third_party/horovod/build_horovod.sh".format(os.getcwd()), shell=True
)
| tao_tensorflow1_backend-main | third_party/horovod/build_horovod.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Some helpers for using jsonnet within ai-infra."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import _jsonnet
from google.protobuf import json_format
from google.protobuf import text_format
_PROTO_EXTENSIONS = [".txt", ".prototxt"]
def _import_callback(proto_constructor=None):
def callback(owning_directory, path):
"""Reads a file from disk.
Args:
unused (?): Not used, but required to pass the callback to jsonnet.
path (str): The path that is being imported.
Returns:
(str, str): The full path and contents of the file.
"""
# This enables both relative and absolute pathing. First, check to see if the absolute
# path exists, at least relative to the working directory. If it doesn't, then prepend
# the path of the directory of the currently executing file
if not os.path.exists(path):
path = os.path.join(owning_directory, path)
with open(path, "r") as infile:
data = infile.read()
if proto_constructor and any(
path.endswith(ext) for ext in _PROTO_EXTENSIONS
):
proto = proto_constructor()
text_format.Merge(data, proto)
data = json_format.MessageToJson(proto)
return path, data
return callback
def evaluate_file(path, ext_vars=None, proto_constructor=None):
"""Evaluates a jsonnet file.
If a proto_constructor is given, then any `import` statements in the jsonnet file can import
text protos; and the returned value will be an object of that proto type.
Args:
path (str): Path to the jsonnet file, relative to ai-infra root.
ext_vars (dict): Variables to pass to the jsonnet script.
proto_constructor (callable): Callable used for converting to protos.
Returns:
(str|protobuf): If a proto constructor is provided, the jsonnet file is evaluated as a text
proto; otherwise it is evaluated as a JSON string.
"""
data = _jsonnet.evaluate_file(
path, ext_vars=ext_vars, import_callback=_import_callback(proto_constructor)
)
if proto_constructor:
proto = proto_constructor()
json_format.Parse(data, proto)
return proto
return data
| tao_tensorflow1_backend-main | third_party/jsonnet/jsonnet.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Some helpers for using jsonnet within ai-infra."""
from third_party.jsonnet.jsonnet import evaluate_file # noqa
__all__ = "evaluate_file"
| tao_tensorflow1_backend-main | third_party/jsonnet/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for jsonnet."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
from third_party import jsonnet
from third_party.jsonnet import fake_pb2
def test_jsonnet():
result = jsonnet.evaluate_file("third_party/jsonnet/testdata/template.jsonnet")
assert json.loads(result)["foo"] == "bar"
def test_relative_import():
result = jsonnet.evaluate_file(
"third_party/jsonnet/testdata/relimport_template.jsonnet"
)
assert json.loads(result)["foo"] == "bar"
def test_jsonnet_with_proto():
result = jsonnet.evaluate_file(
"third_party/jsonnet/testdata/template.jsonnet",
proto_constructor=fake_pb2.FakeMessage,
)
assert result.foo == "bar"
def test_external_variable():
result = jsonnet.evaluate_file(
"third_party/jsonnet/testdata/ext_var.jsonnet",
ext_vars={"external_value": "hello world"},
)
assert json.loads(result)["the_val"] == "hello world"
| tao_tensorflow1_backend-main | third_party/jsonnet/jsonnet_test.py |
#!/usr/bin/env python
# coding: utf-8
import json
import numpy as np
from PIL import Image
from tqdm import tqdm
# [ymin, ymax, xmin, xmax] to [x, y, w, h]
def box_transform(box):
x = box[2]
y = box[0]
w = box[3] - box[2] + 1
h = box[1] - box[0] + 1
return [x, y, w, h]
def convert_anno(split):
with open('data/vrd/new_annotations_' + split + '.json', 'r') as f:
vrd_anns = json.load(f)
print(len(vrd_anns))
img_dir = 'data/vrd/' + split + '_images/'
new_imgs = []
new_anns = []
ann_id = 1
for f, anns in tqdm(vrd_anns.items()):
im_w, im_h = Image.open(img_dir + f).size
image_id = int(f.split('.')[0])
new_imgs.append(dict(file_name=f, height=im_h, width=im_w, id=image_id))
# used for duplicate checking
bbox_set = set()
for ann in anns:
# "area" in COCO is the area of segmentation mask, while here it's the area of bbox
# also need to fake a 'iscrowd' which is always 0
s_box = ann['subject']['bbox']
bbox = box_transform(s_box)
if not tuple(bbox) in bbox_set:
bbox_set.add(tuple(bbox))
area = bbox[2] * bbox[3]
cat = ann['subject']['category']
new_anns.append(dict(area=area, bbox=bbox, category_id=cat, id=ann_id, image_id=image_id, iscrowd=0))
ann_id += 1
o_box = ann['object']['bbox']
bbox = box_transform(o_box)
if not tuple(bbox) in bbox_set:
bbox_set.add(tuple(bbox))
area = bbox[2] * bbox[3]
cat = ann['object']['category']
new_anns.append(dict(area=area, bbox=bbox, category_id=cat, id=ann_id, image_id=image_id, iscrowd=0))
ann_id += 1
with open('data/vrd/objects.json', 'r') as f:
vrd_objs = json.load(f)
new_objs = []
for i, obj in enumerate(vrd_objs):
new_objs.append(dict(id=i, name=obj, supercategory=obj))
new_data = dict(images=new_imgs, annotations=new_anns, categories=new_objs)
with open('data/vrd/detections_' + split + '.json', 'w') as outfile:
json.dump(new_data, outfile)
if __name__ == '__main__':
convert_anno('train')
convert_anno('val')
| ContrastiveLosses4VRD-master | tools/convert_vrd_anno_to_coco_format.py |
# Adapted by Ji Zhang, 2019
#
# Based on Detectron.pytorch/tools/test_net.py Written by Roy Tseng
"""Perform inference on one or more datasets."""
import argparse
import cv2
import os
import pprint
import sys
import time
from six.moves import cPickle as pickle
import torch
import _init_paths # pylint: disable=unused-import
from core.config import cfg, merge_cfg_from_file, merge_cfg_from_list, assert_and_infer_cfg
from core.test_engine_rel import run_inference
import utils.logging
from datasets_rel import task_evaluation_sg as task_evaluation_sg
from datasets_rel import task_evaluation_vg_and_vrd as task_evaluation_vg_and_vrd
# OpenCL may be enabled by default in OpenCV3; disable it because it's not
# thread safe and causes unwanted GPU memory allocations.
cv2.ocl.setUseOpenCL(False)
def parse_args():
"""Parse in command line arguments"""
parser = argparse.ArgumentParser(description='Test a Fast R-CNN network')
parser.add_argument(
'--dataset',
help='training dataset')
parser.add_argument(
'--cfg', dest='cfg_file', required=True,
help='optional config file')
parser.add_argument(
'--load_ckpt', help='path of checkpoint to load')
parser.add_argument(
'--load_detectron', help='path to the detectron weight pickle file')
parser.add_argument(
'--output_dir',
help='output directory to save the testing results. If not provided, '
'defaults to [args.load_ckpt|args.load_detectron]/../test.')
parser.add_argument(
'--set', dest='set_cfgs',
help='set config keys, will overwrite config in the cfg_file.'
' See lib/core/config.py for all options',
default=[], nargs='*')
parser.add_argument(
'--range',
help='start (inclusive) and end (exclusive) indices',
type=int, nargs=2)
parser.add_argument(
'--multi-gpu-testing', help='using multiple gpus for inference',
action='store_true')
parser.add_argument(
'--do_val', dest='do_val', help='do evaluation', action='store_true')
parser.add_argument(
'--do_vis', dest='do_vis', help='visualize the last layer of conv_body', action='store_true')
parser.add_argument(
'--do_special', dest='do_special', help='visualize the last layer of conv_body', action='store_true')
parser.add_argument(
'--use_gt_boxes', dest='use_gt_boxes', help='use gt boxes for sgcls/prdcls', action='store_true')
parser.add_argument(
'--use_gt_labels', dest='use_gt_labels', help='use gt boxes for sgcls/prdcls', action='store_true')
return parser.parse_args()
if __name__ == '__main__':
if not torch.cuda.is_available():
sys.exit("Need a CUDA device to run the code.")
logger = utils.logging.setup_logging(__name__)
args = parse_args()
logger.info('Called with args:')
logger.info(args)
assert (torch.cuda.device_count() == 1) ^ bool(args.multi_gpu_testing)
if args.cfg_file is not None:
merge_cfg_from_file(args.cfg_file)
if args.set_cfgs is not None:
merge_cfg_from_file(args.cfg_file)
if args.dataset == "oi_rel":
cfg.TEST.DATASETS = ('oi_rel_val',)
cfg.MODEL.NUM_CLASSES = 58
cfg.MODEL.NUM_PRD_CLASSES = 9 # rel, exclude background
elif args.dataset == "oi_rel_mini":
cfg.TEST.DATASETS = ('oi_rel_val_mini',)
cfg.MODEL.NUM_CLASSES = 58
cfg.MODEL.NUM_PRD_CLASSES = 9 # rel, exclude background
elif args.dataset == "oi_all_rel_train":
cfg.TEST.DATASETS = ('oi_all_rel_train',)
cfg.MODEL.NUM_CLASSES = 58
cfg.MODEL.NUM_PRD_CLASSES = 9 # rel, exclude background
elif args.dataset == "oi_all_rel":
cfg.TEST.DATASETS = ('oi_all_rel_val',)
cfg.MODEL.NUM_CLASSES = 58
cfg.MODEL.NUM_PRD_CLASSES = 9 # rel, exclude background
elif args.dataset == "oi_kaggle":
cfg.TEST.DATASETS = ('oi_kaggle_rel_test',)
cfg.MODEL.NUM_CLASSES = 58
cfg.MODEL.NUM_PRD_CLASSES = 9 # rel, exclude background
elif args.dataset == "vg_mini":
cfg.TEST.DATASETS = ('vg_val_mini',)
cfg.MODEL.NUM_CLASSES = 151
cfg.MODEL.NUM_PRD_CLASSES = 50 # exclude background
elif args.dataset == "vg":
cfg.TEST.DATASETS = ('vg_val',)
cfg.MODEL.NUM_CLASSES = 151
cfg.MODEL.NUM_PRD_CLASSES = 50 # exclude background
elif args.dataset == "vrd_train":
cfg.TEST.DATASETS = ('vrd_train',)
cfg.MODEL.NUM_CLASSES = 101
cfg.MODEL.NUM_PRD_CLASSES = 70 # exclude background
elif args.dataset == "vrd":
cfg.TEST.DATASETS = ('vrd_val',)
cfg.MODEL.NUM_CLASSES = 101
cfg.MODEL.NUM_PRD_CLASSES = 70 # exclude background
else: # For subprocess call
assert cfg.TEST.DATASETS, 'cfg.TEST.DATASETS shouldn\'t be empty'
assert_and_infer_cfg()
if not cfg.MODEL.RUN_BASELINE:
assert bool(args.load_ckpt) ^ bool(args.load_detectron), \
'Exactly one of --load_ckpt and --load_detectron should be specified.'
if args.output_dir is None:
ckpt_path = args.load_ckpt if args.load_ckpt else args.load_detectron
args.output_dir = os.path.join(
os.path.dirname(os.path.dirname(ckpt_path)), 'test')
logger.info('Automatically set output directory to %s', args.output_dir)
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
logger.info('Testing with config:')
logger.info(pprint.pformat(cfg))
# For test_engine.multi_gpu_test_net_on_dataset
args.test_net_file, _ = os.path.splitext(__file__)
# manually set args.cuda
args.cuda = True
if args.use_gt_boxes:
if args.use_gt_labels:
det_file = os.path.join(args.output_dir, 'rel_detections_gt_boxes_prdcls.pkl')
else:
det_file = os.path.join(args.output_dir, 'rel_detections_gt_boxes_sgcls.pkl')
else:
det_file = os.path.join(args.output_dir, 'rel_detections.pkl')
if os.path.exists(det_file):
logger.info('Loading results from {}'.format(det_file))
with open(det_file, 'rb') as f:
all_results = pickle.load(f)
logger.info('Starting evaluation now...')
if args.dataset.find('vg') >= 0 or args.dataset.find('vrd') >= 0:
task_evaluation_vg_and_vrd.eval_rel_results(all_results, args.output_dir, args.do_val)
else:
task_evaluation_sg.eval_rel_results(all_results, args.output_dir, args.do_val, args.do_vis, args.do_special)
else:
run_inference(
args,
ind_range=args.range,
multi_gpu_testing=args.multi_gpu_testing,
check_expected_results=True)
| ContrastiveLosses4VRD-master | tools/test_net_rel.py |
# Based on Detectron.pytorch/tools/_init_paths.py by Roy Tseng
# modified for this project by Ji Zhang
"""Add {PROJECT_ROOT}/lib. to PYTHONPATH
Usage:
import this module before import any modules under lib/
e.g
import _init_paths
from core.config import cfg
"""
import os.path as osp
import sys
def add_path(path):
if path not in sys.path:
sys.path.insert(0, path)
this_dir = osp.abspath(osp.dirname(osp.dirname(__file__)))
# add Detectron.PyTorch/lib
detectron_path = osp.join(this_dir, 'Detectron_pytorch', 'lib')
add_path(detectron_path)
# Add lib to PYTHONPATH
lib_path = osp.join(this_dir, 'lib')
add_path(lib_path)
| ContrastiveLosses4VRD-master | tools/_init_paths.py |
#!/usr/bin/env python
# coding: utf-8
# In[23]:
import json
import numpy as np
import os
from PIL import Image
from tqdm import tqdm
import copy
from shutil import copyfile
# take the images from the sg_dataset folder and rename them
# Also converts the gif and png images into jpg
def process_vrd_split(in_split, out_split):
vrd_dir = 'data/vrd/sg_dataset/sg_' + in_split + '_images/'
new_dir = 'data/vrd/'+ out_split + '_images/'
os.mkdir(new_dir)
cnt = 1
name_map = {}
for f in tqdm(sorted(os.listdir(vrd_dir))):
# for f in os.listdir(vrd_dir):
ext = f.split('.')[1]
if ext.find('png') >= 0 or ext.find('gif') >= 0:
img = Image.open(vrd_dir + f).convert('RGB')
else:
copyfile(vrd_dir + f, new_dir + '{:012d}'.format(cnt) + '.jpg')
if ext.find('gif') >= 0:
img.save(new_dir + '{:012d}'.format(cnt) + '.jpg')
elif ext.find('png') >= 0:
img.save(new_dir + '{:012d}'.format(cnt) + '.jpg')
name_map[f] = cnt
cnt += 1
print(len(name_map))
# store the filename mappings here
name_map_fname = 'data/vrd/%s_fname_mapping.json' %(out_split)
with open(name_map_fname, 'w') as f:
json.dump(name_map, f, sort_keys=True, indent=4)
f.close()
# load the original annotations
with open('data/vrd/annotations_' + in_split + '.json', 'r') as f:
vrd_anns = json.load(f)
f.close()
new_anns = {}
for k, v in tqdm(vrd_anns.items()):
# apparently this gif file has been renamed in the original annotations
if k == '4392556686_44d71ff5a0_o.jpg':
k = '4392556686_44d71ff5a0_o.gif'
new_k = '{:012d}'.format(name_map[k]) + '.jpg'
new_anns[new_k] = v
# create the new annotations
with open('data/vrd/new_annotations_' + out_split + '.json', 'w') as outfile:
json.dump(new_anns, outfile)
if __name__ == '__main__':
# using the test split as our val. We won't have a true test split for VRD
process_vrd_split('test', 'val')
process_vrd_split('train', 'train')
| ContrastiveLosses4VRD-master | tools/rename_vrd_with_numbers.py |
# Adapted by Ji Zhang, 2019
#
# Based on Detectron.pytorch/tools/train_net.py Written by Roy Tseng
""" Training script for steps_with_decay policy"""
import argparse
import os
import sys
import pickle
import resource
import traceback
import logging
from collections import defaultdict
import numpy as np
import yaml
import torch
from torch.autograd import Variable
import torch.nn as nn
import cv2
cv2.setNumThreads(0) # pytorch issue 1355: possible deadlock in dataloader
import _init_paths # pylint: disable=unused-import
import nn as mynn
import utils_rel.net_rel as net_utils_rel
import utils.misc as misc_utils
from core.config import cfg, cfg_from_file, cfg_from_list, assert_and_infer_cfg
from datasets_rel.roidb_rel import combined_roidb_for_training
from roi_data_rel.loader_rel import RoiDataLoader, MinibatchSampler, BatchSampler, collate_minibatch
from modeling_rel.model_builder_rel import Generalized_RCNN
from utils.detectron_weight_helper import load_detectron_weight
from utils.logging import setup_logging
from utils.timer import Timer
from utils_rel.training_stats_rel import TrainingStats
# Set up logging and load config options
logger = setup_logging(__name__)
logging.getLogger('roi_data.loader').setLevel(logging.INFO)
# RuntimeError: received 0 items of ancdata. Issue: pytorch/pytorch#973
rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)
resource.setrlimit(resource.RLIMIT_NOFILE, (4096, rlimit[1]))
def parse_args():
"""Parse input arguments"""
parser = argparse.ArgumentParser(description='Train a X-RCNN network')
parser.add_argument(
'--dataset', dest='dataset', required=True,
help='Dataset to use')
parser.add_argument(
'--cfg', dest='cfg_file', required=True,
help='Config file for training (and optionally testing)')
parser.add_argument(
'--set', dest='set_cfgs',
help='Set config keys. Key value sequence seperate by whitespace.'
'e.g. [key] [value] [key] [value]',
default=[], nargs='+')
parser.add_argument(
'--disp_interval',
help='Display training info every N iterations',
default=20, type=int)
parser.add_argument(
'--no_cuda', dest='cuda', help='Do not use CUDA device', action='store_false')
# Optimization
# These options has the highest prioity and can overwrite the values in config file
# or values set by set_cfgs. `None` means do not overwrite.
parser.add_argument(
'--bs', dest='batch_size',
help='Explicitly specify to overwrite the value comed from cfg_file.',
type=int)
parser.add_argument(
'--nw', dest='num_workers',
help='Explicitly specify to overwrite number of workers to load data. Defaults to 4',
type=int)
parser.add_argument(
'--iter_size',
help='Update once every iter_size steps, as in Caffe.',
default=1, type=int)
parser.add_argument(
'--o', dest='optimizer', help='Training optimizer.',
default=None)
parser.add_argument(
'--lr', help='Base learning rate.',
default=None, type=float)
parser.add_argument(
'--lr_decay_gamma',
help='Learning rate decay rate.',
default=None, type=float)
# Epoch
parser.add_argument(
'--start_step',
help='Starting step count for training epoch. 0-indexed.',
default=0, type=int)
# Resume training: requires same iterations per epoch
parser.add_argument(
'--resume',
help='resume to training on a checkpoint',
action='store_true')
parser.add_argument(
'--no_save', help='do not save anything', action='store_true')
parser.add_argument(
'--load_ckpt', help='checkpoint path to load')
parser.add_argument(
'--load_detectron', help='path to the detectron weight pickle file')
parser.add_argument(
'--use_tfboard', help='Use tensorflow tensorboard to log training info',
action='store_true')
return parser.parse_args()
def save_ckpt(output_dir, args, step, train_size, model, optimizer):
"""Save checkpoint"""
if args.no_save:
return
ckpt_dir = os.path.join(output_dir, 'ckpt')
if not os.path.exists(ckpt_dir):
os.makedirs(ckpt_dir)
save_name = os.path.join(ckpt_dir, 'model_step{}.pth'.format(step))
if isinstance(model, mynn.DataParallel):
model = model.module
model_state_dict = model.state_dict()
torch.save({
'step': step,
'train_size': train_size,
'batch_size': args.batch_size,
'model': model.state_dict(),
'optimizer': optimizer.state_dict()}, save_name)
logger.info('save model: %s', save_name)
def main():
"""Main function"""
args = parse_args()
print('Called with args:')
print(args)
if not torch.cuda.is_available():
sys.exit("Need a CUDA device to run the code.")
if args.cuda or cfg.NUM_GPUS > 0:
cfg.CUDA = True
else:
raise ValueError("Need Cuda device to run !")
if args.dataset == "vrd":
cfg.TRAIN.DATASETS = ('vrd_train',)
cfg.MODEL.NUM_CLASSES = 101
cfg.MODEL.NUM_PRD_CLASSES = 70 # exclude background
elif args.dataset == "vg_mini":
cfg.TRAIN.DATASETS = ('vg_train_mini',)
cfg.MODEL.NUM_CLASSES = 151
cfg.MODEL.NUM_PRD_CLASSES = 50 # exclude background
elif args.dataset == "vg":
cfg.TRAIN.DATASETS = ('vg_train',)
cfg.MODEL.NUM_CLASSES = 151
cfg.MODEL.NUM_PRD_CLASSES = 50 # exclude background
elif args.dataset == "oi_rel":
cfg.TRAIN.DATASETS = ('oi_rel_train',)
# cfg.MODEL.NUM_CLASSES = 62
cfg.MODEL.NUM_CLASSES = 58
cfg.MODEL.NUM_PRD_CLASSES = 9 # rel, exclude background
elif args.dataset == "oi_rel_mini":
cfg.TRAIN.DATASETS = ('oi_rel_train_mini',)
# cfg.MODEL.NUM_CLASSES = 62
cfg.MODEL.NUM_CLASSES = 58
cfg.MODEL.NUM_PRD_CLASSES = 9 # rel, exclude background
else:
raise ValueError("Unexpected args.dataset: {}".format(args.dataset))
cfg_from_file(args.cfg_file)
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs)
### Adaptively adjust some configs ###
cfg.NUM_GPUS = torch.cuda.device_count()
original_batch_size = cfg.NUM_GPUS * cfg.TRAIN.IMS_PER_BATCH
original_ims_per_batch = cfg.TRAIN.IMS_PER_BATCH
original_num_gpus = cfg.NUM_GPUS
if args.batch_size is None:
args.batch_size = original_batch_size
assert (args.batch_size % cfg.NUM_GPUS) == 0, \
'batch_size: %d, NUM_GPUS: %d' % (args.batch_size, cfg.NUM_GPUS)
cfg.TRAIN.IMS_PER_BATCH = args.batch_size // cfg.NUM_GPUS
effective_batch_size = args.iter_size * args.batch_size
print('effective_batch_size = batch_size * iter_size = %d * %d' % (args.batch_size, args.iter_size))
print('Adaptive config changes:')
print(' effective_batch_size: %d --> %d' % (original_batch_size, effective_batch_size))
print(' NUM_GPUS: %d --> %d' % (original_num_gpus, cfg.NUM_GPUS))
print(' IMS_PER_BATCH: %d --> %d' % (original_ims_per_batch, cfg.TRAIN.IMS_PER_BATCH))
### Adjust learning based on batch size change linearly
# For iter_size > 1, gradients are `accumulated`, so lr is scaled based
# on batch_size instead of effective_batch_size
old_base_lr = cfg.SOLVER.BASE_LR
cfg.SOLVER.BASE_LR *= args.batch_size / original_batch_size
print('Adjust BASE_LR linearly according to batch_size change:\n'
' BASE_LR: {} --> {}'.format(old_base_lr, cfg.SOLVER.BASE_LR))
### Adjust solver steps
step_scale = original_batch_size / effective_batch_size
old_solver_steps = cfg.SOLVER.STEPS
old_max_iter = cfg.SOLVER.MAX_ITER
cfg.SOLVER.STEPS = list(map(lambda x: int(x * step_scale + 0.5), cfg.SOLVER.STEPS))
cfg.SOLVER.MAX_ITER = int(cfg.SOLVER.MAX_ITER * step_scale + 0.5)
print('Adjust SOLVER.STEPS and SOLVER.MAX_ITER linearly based on effective_batch_size change:\n'
' SOLVER.STEPS: {} --> {}\n'
' SOLVER.MAX_ITER: {} --> {}'.format(old_solver_steps, cfg.SOLVER.STEPS,
old_max_iter, cfg.SOLVER.MAX_ITER))
# Scale FPN rpn_proposals collect size (post_nms_topN) in `collect` function
# of `collect_and_distribute_fpn_rpn_proposals.py`
#
# post_nms_topN = int(cfg[cfg_key].RPN_POST_NMS_TOP_N * cfg.FPN.RPN_COLLECT_SCALE + 0.5)
if cfg.FPN.FPN_ON and cfg.MODEL.FASTER_RCNN:
cfg.FPN.RPN_COLLECT_SCALE = cfg.TRAIN.IMS_PER_BATCH / original_ims_per_batch
print('Scale FPN rpn_proposals collect size directly propotional to the change of IMS_PER_BATCH:\n'
' cfg.FPN.RPN_COLLECT_SCALE: {}'.format(cfg.FPN.RPN_COLLECT_SCALE))
if args.num_workers is not None:
cfg.DATA_LOADER.NUM_THREADS = args.num_workers
print('Number of data loading threads: %d' % cfg.DATA_LOADER.NUM_THREADS)
### Overwrite some solver settings from command line arguments
if args.optimizer is not None:
cfg.SOLVER.TYPE = args.optimizer
if args.lr is not None:
cfg.SOLVER.BASE_LR = args.lr
if args.lr_decay_gamma is not None:
cfg.SOLVER.GAMMA = args.lr_decay_gamma
assert_and_infer_cfg()
timers = defaultdict(Timer)
### Dataset ###
timers['roidb'].tic()
roidb, ratio_list, ratio_index = combined_roidb_for_training(
cfg.TRAIN.DATASETS, cfg.TRAIN.PROPOSAL_FILES)
timers['roidb'].toc()
roidb_size = len(roidb)
logger.info('{:d} roidb entries'.format(roidb_size))
logger.info('Takes %.2f sec(s) to construct roidb', timers['roidb'].average_time)
# Effective training sample size for one epoch
train_size = roidb_size // args.batch_size * args.batch_size
batchSampler = BatchSampler(
sampler=MinibatchSampler(ratio_list, ratio_index),
batch_size=args.batch_size,
drop_last=True
)
dataset = RoiDataLoader(
roidb,
cfg.MODEL.NUM_CLASSES,
training=True)
dataloader = torch.utils.data.DataLoader(
dataset,
batch_sampler=batchSampler,
num_workers=cfg.DATA_LOADER.NUM_THREADS,
collate_fn=collate_minibatch)
dataiterator = iter(dataloader)
### Model ###
maskRCNN = Generalized_RCNN()
if cfg.CUDA:
maskRCNN.cuda()
### Optimizer ###
# record backbone params, i.e., conv_body and box_head params
gn_params = []
backbone_bias_params = []
backbone_bias_param_names = []
prd_branch_bias_params = []
prd_branch_bias_param_names = []
backbone_nonbias_params = []
backbone_nonbias_param_names = []
prd_branch_nonbias_params = []
prd_branch_nonbias_param_names = []
for key, value in dict(maskRCNN.named_parameters()).items():
if value.requires_grad:
if 'gn' in key:
gn_params.append(value)
elif 'Conv_Body' in key or 'Box_Head' in key or 'Box_Outs' in key or 'RPN' in key:
if 'bias' in key:
backbone_bias_params.append(value)
backbone_bias_param_names.append(key)
else:
backbone_nonbias_params.append(value)
backbone_nonbias_param_names.append(key)
else:
if 'bias' in key:
prd_branch_bias_params.append(value)
prd_branch_bias_param_names.append(key)
else:
prd_branch_nonbias_params.append(value)
prd_branch_nonbias_param_names.append(key)
# Learning rate of 0 is a dummy value to be set properly at the start of training
params = [
{'params': backbone_nonbias_params,
'lr': 0,
'weight_decay': cfg.SOLVER.WEIGHT_DECAY},
{'params': backbone_bias_params,
'lr': 0 * (cfg.SOLVER.BIAS_DOUBLE_LR + 1),
'weight_decay': cfg.SOLVER.WEIGHT_DECAY if cfg.SOLVER.BIAS_WEIGHT_DECAY else 0},
{'params': prd_branch_nonbias_params,
'lr': 0,
'weight_decay': cfg.SOLVER.WEIGHT_DECAY},
{'params': prd_branch_bias_params,
'lr': 0 * (cfg.SOLVER.BIAS_DOUBLE_LR + 1),
'weight_decay': cfg.SOLVER.WEIGHT_DECAY if cfg.SOLVER.BIAS_WEIGHT_DECAY else 0},
{'params': gn_params,
'lr': 0,
'weight_decay': cfg.SOLVER.WEIGHT_DECAY_GN}
]
if cfg.SOLVER.TYPE == "SGD":
optimizer = torch.optim.SGD(params, momentum=cfg.SOLVER.MOMENTUM)
elif cfg.SOLVER.TYPE == "Adam":
optimizer = torch.optim.Adam(params)
### Load checkpoint
if args.load_ckpt:
load_name = args.load_ckpt
logging.info("loading checkpoint %s", load_name)
checkpoint = torch.load(load_name, map_location=lambda storage, loc: storage)
net_utils_rel.load_ckpt_rel(maskRCNN, checkpoint['model'])
if args.resume:
args.start_step = checkpoint['step'] + 1
if 'train_size' in checkpoint: # For backward compatibility
if checkpoint['train_size'] != train_size:
print('train_size value: %d different from the one in checkpoint: %d'
% (train_size, checkpoint['train_size']))
# reorder the params in optimizer checkpoint's params_groups if needed
# misc_utils.ensure_optimizer_ckpt_params_order(param_names, checkpoint)
# There is a bug in optimizer.load_state_dict on Pytorch 0.3.1.
# However it's fixed on master.
# optimizer.load_state_dict(checkpoint['optimizer'])
misc_utils.load_optimizer_state_dict(optimizer, checkpoint['optimizer'])
del checkpoint
torch.cuda.empty_cache()
if args.load_detectron: #TODO resume for detectron weights (load sgd momentum values)
logging.info("loading Detectron weights %s", args.load_detectron)
load_detectron_weight(maskRCNN, args.load_detectron)
# lr = optimizer.param_groups[0]['lr'] # lr of non-bias parameters, for commmand line outputs.
lr = optimizer.param_groups[2]['lr'] # lr of non-backbone parameters, for commmand line outputs.
backbone_lr = optimizer.param_groups[0]['lr'] # lr of backbone parameters, for commmand line outputs.
maskRCNN = mynn.DataParallel(maskRCNN, cpu_keywords=['im_info', 'roidb'],
minibatch=True)
### Training Setups ###
args.run_name = misc_utils.get_run_name() + '_step_with_prd_cls_v' + str(cfg.MODEL.SUBTYPE)
output_dir = misc_utils.get_output_dir(args, args.run_name)
args.cfg_filename = os.path.basename(args.cfg_file)
if not args.no_save:
if not os.path.exists(output_dir):
os.makedirs(output_dir)
blob = {'cfg': yaml.dump(cfg), 'args': args}
with open(os.path.join(output_dir, 'config_and_args.pkl'), 'wb') as f:
pickle.dump(blob, f, pickle.HIGHEST_PROTOCOL)
if args.use_tfboard:
from tensorboardX import SummaryWriter
# Set the Tensorboard logger
tblogger = SummaryWriter(output_dir)
### Training Loop ###
maskRCNN.train()
# CHECKPOINT_PERIOD = int(cfg.TRAIN.SNAPSHOT_ITERS / cfg.NUM_GPUS)
CHECKPOINT_PERIOD = cfg.SOLVER.MAX_ITER / cfg.TRAIN.SNAPSHOT_FREQ
# Set index for decay steps
decay_steps_ind = None
for i in range(1, len(cfg.SOLVER.STEPS)):
if cfg.SOLVER.STEPS[i] >= args.start_step:
decay_steps_ind = i
break
if decay_steps_ind is None:
decay_steps_ind = len(cfg.SOLVER.STEPS)
training_stats = TrainingStats(
args,
args.disp_interval,
tblogger if args.use_tfboard and not args.no_save else None)
try:
logger.info('Training starts !')
step = args.start_step
for step in range(args.start_step, cfg.SOLVER.MAX_ITER):
# Warm up
if step < cfg.SOLVER.WARM_UP_ITERS:
method = cfg.SOLVER.WARM_UP_METHOD
if method == 'constant':
warmup_factor = cfg.SOLVER.WARM_UP_FACTOR
elif method == 'linear':
alpha = step / cfg.SOLVER.WARM_UP_ITERS
warmup_factor = cfg.SOLVER.WARM_UP_FACTOR * (1 - alpha) + alpha
else:
raise KeyError('Unknown SOLVER.WARM_UP_METHOD: {}'.format(method))
lr_new = cfg.SOLVER.BASE_LR * warmup_factor
net_utils_rel.update_learning_rate_rel(optimizer, lr, lr_new)
# lr = optimizer.param_groups[0]['lr']
lr = optimizer.param_groups[2]['lr']
backbone_lr = optimizer.param_groups[0]['lr']
assert lr == lr_new
elif step == cfg.SOLVER.WARM_UP_ITERS:
net_utils_rel.update_learning_rate_rel(optimizer, lr, cfg.SOLVER.BASE_LR)
# lr = optimizer.param_groups[0]['lr']
lr = optimizer.param_groups[2]['lr']
backbone_lr = optimizer.param_groups[0]['lr']
assert lr == cfg.SOLVER.BASE_LR
# Learning rate decay
if decay_steps_ind < len(cfg.SOLVER.STEPS) and \
step == cfg.SOLVER.STEPS[decay_steps_ind]:
logger.info('Decay the learning on step %d', step)
lr_new = lr * cfg.SOLVER.GAMMA
net_utils_rel.update_learning_rate_rel(optimizer, lr, lr_new)
# lr = optimizer.param_groups[0]['lr']
lr = optimizer.param_groups[2]['lr']
backbone_lr = optimizer.param_groups[0]['lr']
assert lr == lr_new
decay_steps_ind += 1
training_stats.IterTic()
optimizer.zero_grad()
for inner_iter in range(args.iter_size):
try:
input_data = next(dataiterator)
except StopIteration:
dataiterator = iter(dataloader)
input_data = next(dataiterator)
for key in input_data:
if key != 'roidb': # roidb is a list of ndarrays with inconsistent length
input_data[key] = list(map(Variable, input_data[key]))
net_outputs = maskRCNN(**input_data)
training_stats.UpdateIterStats(net_outputs, inner_iter)
loss = net_outputs['total_loss']
loss.backward()
optimizer.step()
training_stats.IterToc()
training_stats.LogIterStats(step, lr, backbone_lr)
if (step+1) % CHECKPOINT_PERIOD == 0:
save_ckpt(output_dir, args, step, train_size, maskRCNN, optimizer)
# ---- Training ends ----
# Save last checkpoint
save_ckpt(output_dir, args, step, train_size, maskRCNN, optimizer)
except (RuntimeError, KeyboardInterrupt):
del dataiterator
logger.info('Save ckpt on exception ...')
save_ckpt(output_dir, args, step, train_size, maskRCNN, optimizer)
logger.info('Save ckpt done.')
stack_trace = traceback.format_exc()
print(stack_trace)
finally:
if args.use_tfboard and not args.no_save:
tblogger.close()
if __name__ == '__main__':
main()
| ContrastiveLosses4VRD-master | tools/train_net_step_rel.py |
# Based on:
# Detectron.pytorch/lib/setup.py
# and modified for this project
# Original source license text:
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
from __future__ import print_function
from Cython.Build import cythonize
from Cython.Distutils import build_ext
from setuptools import Extension
from setuptools import setup
import numpy as np
# Obtain the numpy include directory. This logic works across numpy versions.
try:
numpy_include = np.get_include()
except AttributeError:
numpy_include = np.get_numpy_include()
ext_modules = [
Extension(
name='utils_rel.cython_bbox_rel',
sources=['utils_rel/cython_bbox_rel.pyx'],
extra_compile_args=['-Wno-cpp'],
include_dirs=[numpy_include]
)
]
setup(
name='mask_rcnn_rel',
ext_modules=cythonize(ext_modules)
)
| ContrastiveLosses4VRD-master | lib/setup.py |
# Adapted by Ji Zhang, 2019
# from Detectron.pytorch/lib/core/test_engine.py
# Original license text below
# --------------------------------------------------------
# Copyright (c) 2017-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
"""Test a Detectron network on an imdb (image database)."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from collections import defaultdict
import cv2
import datetime
import logging
import numpy as np
from numpy import linalg as la
import os
import yaml
import json
from six.moves import cPickle as pickle
import torch
import nn as mynn
from torch.autograd import Variable
from core.config import cfg
from core.test_rel import im_detect_rels
from datasets_rel import task_evaluation_sg as task_evaluation_sg
from datasets_rel import task_evaluation_vg_and_vrd as task_evaluation_vg_and_vrd
from datasets_rel.json_dataset_rel import JsonDatasetRel
from modeling_rel import model_builder_rel
from utils.detectron_weight_helper import load_detectron_weight
import utils.env as envu
import utils_rel.net_rel as net_utils_rel
import utils_rel.subprocess_rel as subprocess_utils
import utils.vis as vis_utils
from utils.io import save_object
from utils.timer import Timer
logger = logging.getLogger(__name__)
def get_eval_functions():
# Determine which parent or child function should handle inference
# Generic case that handles all network types other than RPN-only nets
# and RetinaNet
child_func = test_net
parent_func = test_net_on_dataset
return parent_func, child_func
def get_inference_dataset(index, is_parent=True):
assert is_parent or len(cfg.TEST.DATASETS) == 1, \
'The child inference process can only work on a single dataset'
dataset_name = cfg.TEST.DATASETS[index]
proposal_file = None
return dataset_name, proposal_file
def run_inference(
args, ind_range=None,
multi_gpu_testing=False, gpu_id=0,
check_expected_results=False):
parent_func, child_func = get_eval_functions()
is_parent = ind_range is None
def result_getter():
if is_parent:
# Parent case:
# In this case we're either running inference on the entire dataset in a
# single process or (if multi_gpu_testing is True) using this process to
# launch subprocesses that each run inference on a range of the dataset
all_results = []
for i in range(len(cfg.TEST.DATASETS)):
dataset_name, proposal_file = get_inference_dataset(i)
output_dir = args.output_dir
results = parent_func(
args,
dataset_name,
proposal_file,
output_dir,
multi_gpu=multi_gpu_testing
)
all_results.append(results)
return all_results
else:
# Subprocess child case:
# In this case test_net was called via subprocess.Popen to execute on a
# range of inputs on a single dataset
dataset_name, proposal_file = get_inference_dataset(0, is_parent=False)
output_dir = args.output_dir
return child_func(
args,
dataset_name,
proposal_file,
output_dir,
ind_range=ind_range,
gpu_id=gpu_id
)
all_results = result_getter()
return all_results
def test_net_on_dataset(
args,
dataset_name,
proposal_file,
output_dir,
multi_gpu=False,
gpu_id=0):
"""Run inference on a dataset."""
dataset = JsonDatasetRel(dataset_name)
test_timer = Timer()
test_timer.tic()
if multi_gpu:
num_images = len(dataset.get_roidb(gt=args.do_val))
all_results = multi_gpu_test_net_on_dataset(
args, dataset_name, proposal_file, num_images, output_dir
)
else:
all_results = test_net(
args, dataset_name, proposal_file, output_dir, gpu_id=gpu_id
)
test_timer.toc()
logger.info('Total inference time: {:.3f}s'.format(test_timer.average_time))
logger.info('Starting evaluation now...')
if dataset_name.find('vg') >= 0 or dataset_name.find('vrd') >= 0:
task_evaluation_vg_and_vrd.eval_rel_results(all_results, output_dir, args.do_val)
else:
task_evaluation_sg.eval_rel_results(all_results, output_dir, args.do_val, args.do_vis, args.do_special)
return all_results
def multi_gpu_test_net_on_dataset(
args, dataset_name, proposal_file, num_images, output_dir):
"""Multi-gpu inference on a dataset."""
binary_dir = envu.get_runtime_dir()
binary_ext = envu.get_py_bin_ext()
binary = os.path.join(binary_dir, args.test_net_file + binary_ext)
assert os.path.exists(binary), 'Binary \'{}\' not found'.format(binary)
# Pass the target dataset and proposal file (if any) via the command line
opts = ['TEST.DATASETS', '("{}",)'.format(dataset_name)]
if proposal_file:
opts += ['TEST.PROPOSAL_FILES', '("{}",)'.format(proposal_file)]
if args.do_val:
opts += ['--do_val']
if args.do_vis:
opts += ['--do_vis']
if args.do_special:
opts += ['--do_special']
if args.use_gt_boxes:
opts += ['--use_gt_boxes']
if args.use_gt_labels:
opts += ['--use_gt_labels']
# Run inference in parallel in subprocesses
# Outputs will be a list of outputs from each subprocess, where the output
# of each subprocess is the dictionary saved by test_net().
outputs = subprocess_utils.process_in_parallel(
'rel_detection', num_images, binary, output_dir,
args.load_ckpt, args.load_detectron, opts
)
# Collate the results from each subprocess
all_results = []
for det_data in outputs:
all_results += det_data
if args.use_gt_boxes:
if args.use_gt_labels:
det_file = os.path.join(args.output_dir, 'rel_detections_gt_boxes_prdcls.pkl')
else:
det_file = os.path.join(args.output_dir, 'rel_detections_gt_boxes_sgcls.pkl')
else:
det_file = os.path.join(args.output_dir, 'rel_detections.pkl')
save_object(all_results, det_file)
logger.info('Wrote rel_detections to: {}'.format(os.path.abspath(det_file)))
return all_results
def test_net(
args,
dataset_name,
proposal_file,
output_dir,
ind_range=None,
gpu_id=0):
"""Run inference on all images in a dataset or over an index range of images
in a dataset using a single GPU.
"""
assert not cfg.MODEL.RPN_ONLY, \
'Use rpn_generate to generate proposals from RPN-only models'
roidb, dataset, start_ind, end_ind, total_num_images = get_roidb_and_dataset(
dataset_name, proposal_file, ind_range, args.do_val
)
model = initialize_model_from_cfg(args, gpu_id=gpu_id)
num_images = len(roidb)
all_results = [None for _ in range(num_images)]
timers = defaultdict(Timer)
for i, entry in enumerate(roidb):
box_proposals = None
im = cv2.imread(entry['image'])
if args.use_gt_boxes:
im_results = im_detect_rels(model, im, dataset_name, box_proposals, args.do_vis, timers, entry, args.use_gt_labels)
else:
im_results = im_detect_rels(model, im, dataset_name, box_proposals, args.do_vis, timers)
im_results.update(dict(image=entry['image']))
# add gt
if args.do_val:
im_results.update(
dict(gt_sbj_boxes=entry['sbj_gt_boxes'],
gt_sbj_labels=entry['sbj_gt_classes'],
gt_obj_boxes=entry['obj_gt_boxes'],
gt_obj_labels=entry['obj_gt_classes'],
gt_prd_labels=entry['prd_gt_classes']))
all_results[i] = im_results
if i % 10 == 0: # Reduce log file size
ave_total_time = np.sum([t.average_time for t in timers.values()])
eta_seconds = ave_total_time * (num_images - i - 1)
eta = str(datetime.timedelta(seconds=int(eta_seconds)))
det_time = (timers['im_detect_rels'].average_time)
logger.info((
'im_detect: range [{:d}, {:d}] of {:d}: '
'{:d}/{:d} {:.3f}s (eta: {})').format(
start_ind + 1, end_ind, total_num_images, start_ind + i + 1,
start_ind + num_images, det_time, eta))
cfg_yaml = yaml.dump(cfg)
if ind_range is not None:
det_name = 'rel_detection_range_%s_%s.pkl' % tuple(ind_range)
else:
if args.use_gt_boxes:
if args.use_gt_labels:
det_name = 'rel_detections_gt_boxes_prdcls.pkl'
else:
det_name = 'rel_detections_gt_boxes_sgcls.pkl'
else:
det_name = 'rel_detections.pkl'
det_file = os.path.join(output_dir, det_name)
save_object(all_results, det_file)
logger.info('Wrote rel_detections to: {}'.format(os.path.abspath(det_file)))
return all_results
def initialize_model_from_cfg(args, gpu_id=0):
"""Initialize a model from the global cfg. Loads test-time weights and
set to evaluation mode.
"""
model = model_builder_rel.Generalized_RCNN()
model.eval()
if args.cuda:
model.cuda()
if args.load_ckpt:
load_name = args.load_ckpt
logger.info("loading checkpoint %s", load_name)
checkpoint = torch.load(load_name, map_location=lambda storage, loc: storage)
net_utils_rel.load_ckpt_rel(model, checkpoint['model'])
if args.load_detectron:
logger.info("loading detectron weights %s", args.load_detectron)
load_detectron_weight(model, args.load_detectron)
model = mynn.DataParallel(model, cpu_keywords=['im_info', 'roidb'], minibatch=True)
return model
def get_roidb_and_dataset(dataset_name, proposal_file, ind_range, do_val=True):
"""Get the roidb for the dataset specified in the global cfg. Optionally
restrict it to a range of indices if ind_range is a pair of integers.
"""
dataset = JsonDatasetRel(dataset_name)
roidb = dataset.get_roidb(gt=do_val)
if ind_range is not None:
total_num_images = len(roidb)
start, end = ind_range
roidb = roidb[start:end]
else:
start = 0
end = len(roidb)
total_num_images = end
return roidb, dataset, start, end, total_num_images
| ContrastiveLosses4VRD-master | lib/core/test_engine_rel.py |
# Based on Detectron.pytorch/lib/core/config.py
# --------------------------------------------------------
# Detectron.pytorch
# Licensed under The MIT License [see LICENSE for details]
# Written by roytseng-tw
# --------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import six
import os
import os.path as osp
import copy
from ast import literal_eval
import numpy as np
from packaging import version
import torch
import torch.nn as nn
from torch.nn import init
import yaml
import _init_paths
import nn as mynn
from utils.collections import AttrDict
__C = AttrDict()
# Consumers can get config by:
# from fast_rcnn_config import cfg
cfg = __C
# Random note: avoid using '.ON' as a config key since yaml converts it to True;
# prefer 'ENABLED' instead
# ---------------------------------------------------------------------------- #
# Training options
# ---------------------------------------------------------------------------- #
__C.TRAIN = AttrDict()
# Datasets to train on
# Available dataset list: datasets.dataset_catalog.DATASETS.keys()
# If multiple datasets are listed, the model is trained on their union
__C.TRAIN.DATASETS = ()
# Scales to use during training
# Each scale is the pixel size of an image's shortest side
# If multiple scales are listed, then one is selected uniformly at random for
# each training image (i.e., scale jitter data augmentation)
__C.TRAIN.SCALES = (600, )
# Max pixel size of the longest side of a scaled input image
__C.TRAIN.MAX_SIZE = 1000
# Images *per GPU* in the training minibatch
# Total images per minibatch = TRAIN.IMS_PER_BATCH * NUM_GPUS
__C.TRAIN.IMS_PER_BATCH = 2
# RoI minibatch size *per image* (number of regions of interest [ROIs])
# Total number of RoIs per training minibatch =
# TRAIN.BATCH_SIZE_PER_IM * TRAIN.IMS_PER_BATCH * NUM_GPUS
# E.g., a common configuration is: 512 * 2 * 8 = 8192
__C.TRAIN.BATCH_SIZE_PER_IM = 64
__C.TRAIN.FG_REL_SIZE_PER_IM = 512
# Fraction of minibatch that is labeled foreground (i.e. class > 0)
__C.TRAIN.FG_FRACTION = 0.25
__C.TRAIN.FG_REL_FRACTION = 0.25
# Overlap threshold for a ROI to be considered foreground (if >= FG_THRESH)
__C.TRAIN.FG_THRESH = 0.5
# Overlap threshold for a ROI to be considered background (class = 0 if
# overlap in [LO, HI))
__C.TRAIN.BG_THRESH_HI = 0.5
__C.TRAIN.BG_THRESH_LO = 0.0
# Use horizontally-flipped images during training?
__C.TRAIN.USE_FLIPPED = True
# Overlap required between a ROI and ground-truth box in order for that ROI to
# be used as a bounding-box regression training example
__C.TRAIN.BBOX_THRESH = 0.5
# Train using these proposals
# During training, all proposals specified in the file are used (no limit is
# applied)
# Proposal files must be in correspondence with the datasets listed in
# TRAIN.DATASETS
__C.TRAIN.PROPOSAL_FILES = ()
# Snapshot (model checkpoint) period
# Divide by NUM_GPUS to determine actual period (e.g., 20000/8 => 2500 iters)
# to allow for linear training schedule scaling
__C.TRAIN.SNAPSHOT_ITERS = 20000
__C.TRAIN.SNAPSHOT_FREQ = 1
# Normalize the targets (subtract empirical mean, divide by empirical stddev)
__C.TRAIN.BBOX_NORMALIZE_TARGETS = True
# Deprecated (inside weights)
__C.TRAIN.BBOX_INSIDE_WEIGHTS = (1.0, 1.0, 1.0, 1.0)
# Normalize the targets using "precomputed" (or made up) means and stdevs
# (BBOX_NORMALIZE_TARGETS must also be True) (legacy)
__C.TRAIN.BBOX_NORMALIZE_TARGETS_PRECOMPUTED = False
__C.TRAIN.BBOX_NORMALIZE_MEANS = (0.0, 0.0, 0.0, 0.0)
__C.TRAIN.BBOX_NORMALIZE_STDS = (0.1, 0.1, 0.2, 0.2)
# Make minibatches from images that have similar aspect ratios (i.e. both
# tall and thin or both short and wide)
# This feature is critical for saving memory (and makes training slightly
# faster)
__C.TRAIN.ASPECT_GROUPING = True
# Crop images that have too small or too large aspect ratio
__C.TRAIN.ASPECT_CROPPING = False
__C.TRAIN.ASPECT_HI = 2
__C.TRAIN.ASPECT_LO = 0.5
# ---------------------------------------------------------------------------- #
# RPN training options
# ---------------------------------------------------------------------------- #
# Minimum overlap required between an anchor and ground-truth box for the
# (anchor, gt box) pair to be a positive example (IOU >= thresh ==> positive RPN
# example)
__C.TRAIN.RPN_POSITIVE_OVERLAP = 0.7
# Maximum overlap allowed between an anchor and ground-truth box for the
# (anchor, gt box) pair to be a negative examples (IOU < thresh ==> negative RPN
# example)
__C.TRAIN.RPN_NEGATIVE_OVERLAP = 0.3
# Target fraction of foreground (positive) examples per RPN minibatch
__C.TRAIN.RPN_FG_FRACTION = 0.5
# Total number of RPN examples per image
__C.TRAIN.RPN_BATCH_SIZE_PER_IM = 256
# NMS threshold used on RPN proposals (used during end-to-end training with RPN)
__C.TRAIN.RPN_NMS_THRESH = 0.7
# Number of top scoring RPN proposals to keep before applying NMS (per image)
# When FPN is used, this is *per FPN level* (not total)
__C.TRAIN.RPN_PRE_NMS_TOP_N = 12000
# Number of top scoring RPN proposals to keep after applying NMS (per image)
# This is the total number of RPN proposals produced (for both FPN and non-FPN
# cases)
__C.TRAIN.RPN_POST_NMS_TOP_N = 2000
# Remove RPN anchors that go outside the image by RPN_STRADDLE_THRESH pixels
# Set to -1 or a large value, e.g. 100000, to disable pruning anchors
__C.TRAIN.RPN_STRADDLE_THRESH = 0
# Proposal height and width both need to be greater than RPN_MIN_SIZE
# (at orig image scale; not scale used during training or inference)
__C.TRAIN.RPN_MIN_SIZE = 0
# Filter proposals that are inside of crowd regions by CROWD_FILTER_THRESH
# "Inside" is measured as: proposal-with-crowd intersection area divided by
# proposal area
__C.TRAIN.CROWD_FILTER_THRESH = 0.7
# Ignore ground-truth objects with area < this threshold
__C.TRAIN.GT_MIN_AREA = -1
# Freeze the backbone architecture during training if set to True
__C.TRAIN.FREEZE_CONV_BODY = False
__C.TRAIN.FREEZE_PRD_CONV_BODY = False
__C.TRAIN.FREEZE_PRD_BOX_HEAD = False
# ---------------------------------------------------------------------------- #
# Data loader options
# ---------------------------------------------------------------------------- #
__C.DATA_LOADER = AttrDict()
# Number of Python threads to use for the data loader (warning: using too many
# threads can cause GIL-based interference with Python Ops leading to *slower*
# training; 4 seems to be the sweet spot in our experience)
__C.DATA_LOADER.NUM_THREADS = 4
# ---------------------------------------------------------------------------- #
# Inference ('test') options
# ---------------------------------------------------------------------------- #
__C.TEST = AttrDict()
# Datasets to test on
# Available dataset list: datasets.dataset_catalog.DATASETS.keys()
# If multiple datasets are listed, testing is performed on each one sequentially
__C.TEST.DATASETS = ()
# Scale to use during testing (can NOT list multiple scales)
# The scale is the pixel size of an image's shortest side
__C.TEST.SCALE = 600
# Max pixel size of the longest side of a scaled input image
__C.TEST.MAX_SIZE = 1000
# Overlap threshold used for non-maximum suppression (suppress boxes with
# IoU >= this threshold)
__C.TEST.NMS = 0.3
# Apply Fast R-CNN style bounding-box regression if True
__C.TEST.BBOX_REG = True
# Test using these proposal files (must correspond with TEST.DATASETS)
__C.TEST.PROPOSAL_FILES = ()
# Limit on the number of proposals per image used during inference
__C.TEST.PROPOSAL_LIMIT = 2000
## NMS threshold used on RPN proposals
__C.TEST.RPN_NMS_THRESH = 0.7
# Number of top scoring RPN proposals to keep before applying NMS
# When FPN is used, this is *per FPN level* (not total)
__C.TEST.RPN_PRE_NMS_TOP_N = 12000
# Number of top scoring RPN proposals to keep after applying NMS
# This is the total number of RPN proposals produced (for both FPN and non-FPN
# cases)
__C.TEST.RPN_POST_NMS_TOP_N = 2000
# Proposal height and width both need to be greater than RPN_MIN_SIZE
# (at orig image scale; not scale used during training or inference)
__C.TEST.RPN_MIN_SIZE = 0
# Maximum number of detections to return per image (100 is based on the limit
# established for the COCO dataset)
__C.TEST.DETECTIONS_PER_IM = 100
# Minimum score threshold (assuming scores in a [0, 1] range); a value chosen to
# balance obtaining high recall with not having too many low precision
# detections that will slow down inference post processing steps (like NMS)
__C.TEST.SCORE_THRESH = 0.05
# Used to filter out bad relationships
__C.TEST.SPO_SCORE_THRESH = 0.00001
# __C.TEST.SPO_SCORE_THRESH = 0
__C.TEST.PRD_Ks = (1, 10, 70)
# Save detection results files if True
# If false, results files are cleaned up (they can be large) after local
# evaluation
__C.TEST.COMPETITION_MODE = True
# Evaluate detections with the COCO json dataset eval code even if it's not the
# evaluation code for the dataset (e.g. evaluate PASCAL VOC results using the
# COCO API to get COCO style AP on PASCAL VOC)
__C.TEST.FORCE_JSON_DATASET_EVAL = False
# [Inferred value; do not set directly in a config]
# Indicates if precomputed proposals are used at test time
# Not set for 1-stage models and 2-stage models with RPN subnetwork enabled
__C.TEST.PRECOMPUTED_PROPOSALS = True
# ---------------------------------------------------------------------------- #
# Test-time augmentations for bounding box detection
# See configs/test_time_aug/e2e_mask_rcnn_R-50-FPN_2x.yaml for an example
# ---------------------------------------------------------------------------- #
__C.TEST.BBOX_AUG = AttrDict()
# Enable test-time augmentation for bounding box detection if True
__C.TEST.BBOX_AUG.ENABLED = False
# Heuristic used to combine predicted box scores
# Valid options: ('ID', 'AVG', 'UNION')
__C.TEST.BBOX_AUG.SCORE_HEUR = 'UNION'
# Heuristic used to combine predicted box coordinates
# Valid options: ('ID', 'AVG', 'UNION')
__C.TEST.BBOX_AUG.COORD_HEUR = 'UNION'
# Horizontal flip at the original scale (id transform)
__C.TEST.BBOX_AUG.H_FLIP = False
# Each scale is the pixel size of an image's shortest side
__C.TEST.BBOX_AUG.SCALES = ()
# Max pixel size of the longer side
__C.TEST.BBOX_AUG.MAX_SIZE = 4000
# Horizontal flip at each scale
__C.TEST.BBOX_AUG.SCALE_H_FLIP = False
# Apply scaling based on object size
__C.TEST.BBOX_AUG.SCALE_SIZE_DEP = False
__C.TEST.BBOX_AUG.AREA_TH_LO = 50**2
__C.TEST.BBOX_AUG.AREA_TH_HI = 180**2
# Each aspect ratio is relative to image width
__C.TEST.BBOX_AUG.ASPECT_RATIOS = ()
# Horizontal flip at each aspect ratio
__C.TEST.BBOX_AUG.ASPECT_RATIO_H_FLIP = False
# ---------------------------------------------------------------------------- #
# Test-time augmentations for mask detection
# See configs/test_time_aug/e2e_mask_rcnn_R-50-FPN_2x.yaml for an example
# ---------------------------------------------------------------------------- #
__C.TEST.MASK_AUG = AttrDict()
# Enable test-time augmentation for instance mask detection if True
__C.TEST.MASK_AUG.ENABLED = False
# Heuristic used to combine mask predictions
# SOFT prefix indicates that the computation is performed on soft masks
# Valid options: ('SOFT_AVG', 'SOFT_MAX', 'LOGIT_AVG')
__C.TEST.MASK_AUG.HEUR = 'SOFT_AVG'
# Horizontal flip at the original scale (id transform)
__C.TEST.MASK_AUG.H_FLIP = False
# Each scale is the pixel size of an image's shortest side
__C.TEST.MASK_AUG.SCALES = ()
# Max pixel size of the longer side
__C.TEST.MASK_AUG.MAX_SIZE = 4000
# Horizontal flip at each scale
__C.TEST.MASK_AUG.SCALE_H_FLIP = False
# Apply scaling based on object size
__C.TEST.MASK_AUG.SCALE_SIZE_DEP = False
__C.TEST.MASK_AUG.AREA_TH = 180**2
# Each aspect ratio is relative to image width
__C.TEST.MASK_AUG.ASPECT_RATIOS = ()
# Horizontal flip at each aspect ratio
__C.TEST.MASK_AUG.ASPECT_RATIO_H_FLIP = False
# ---------------------------------------------------------------------------- #
# Test-augmentations for keypoints detection
# configs/test_time_aug/keypoint_rcnn_R-50-FPN_1x.yaml
# ---------------------------------------------------------------------------- #
__C.TEST.KPS_AUG = AttrDict()
# Enable test-time augmentation for keypoint detection if True
__C.TEST.KPS_AUG.ENABLED = False
# Heuristic used to combine keypoint predictions
# Valid options: ('HM_AVG', 'HM_MAX')
__C.TEST.KPS_AUG.HEUR = 'HM_AVG'
# Horizontal flip at the original scale (id transform)
__C.TEST.KPS_AUG.H_FLIP = False
# Each scale is the pixel size of an image's shortest side
__C.TEST.KPS_AUG.SCALES = ()
# Max pixel size of the longer side
__C.TEST.KPS_AUG.MAX_SIZE = 4000
# Horizontal flip at each scale
__C.TEST.KPS_AUG.SCALE_H_FLIP = False
# Apply scaling based on object size
__C.TEST.KPS_AUG.SCALE_SIZE_DEP = False
__C.TEST.KPS_AUG.AREA_TH = 180**2
# Eeach aspect ratio is realtive to image width
__C.TEST.KPS_AUG.ASPECT_RATIOS = ()
# Horizontal flip at each aspect ratio
__C.TEST.KPS_AUG.ASPECT_RATIO_H_FLIP = False
# ---------------------------------------------------------------------------- #
# Soft NMS
# ---------------------------------------------------------------------------- #
__C.TEST.SOFT_NMS = AttrDict()
# Use soft NMS instead of standard NMS if set to True
__C.TEST.SOFT_NMS.ENABLED = False
# See soft NMS paper for definition of these options
__C.TEST.SOFT_NMS.METHOD = 'linear'
__C.TEST.SOFT_NMS.SIGMA = 0.5
# For the soft NMS overlap threshold, we simply use TEST.NMS
# ---------------------------------------------------------------------------- #
# Bounding box voting (from the Multi-Region CNN paper)
# ---------------------------------------------------------------------------- #
__C.TEST.BBOX_VOTE = AttrDict()
# Use box voting if set to True
__C.TEST.BBOX_VOTE.ENABLED = False
# We use TEST.NMS threshold for the NMS step. VOTE_TH overlap threshold
# is used to select voting boxes (IoU >= VOTE_TH) for each box that survives NMS
__C.TEST.BBOX_VOTE.VOTE_TH = 0.8
# The method used to combine scores when doing bounding box voting
# Valid options include ('ID', 'AVG', 'IOU_AVG', 'GENERALIZED_AVG', 'QUASI_SUM')
__C.TEST.BBOX_VOTE.SCORING_METHOD = 'ID'
# Hyperparameter used by the scoring method (it has different meanings for
# different methods)
__C.TEST.BBOX_VOTE.SCORING_METHOD_BETA = 1.0
__C.TEST.USE_GT_BOXES = False
# ---------------------------------------------------------------------------- #
# Model options
# ---------------------------------------------------------------------------- #
__C.MODEL = AttrDict()
# The type of model to use
# The string must match a function in the modeling.model_builder module
# (e.g., 'generalized_rcnn', 'mask_rcnn', ...)
__C.MODEL.TYPE = ''
__C.MODEL.SUBTYPE = 1
__C.MODEL.RUN_BASELINE = False
__C.MODEL.FEAT_LEVEL = 7
__C.MODEL.NO_FC7_RELU = False
__C.MODEL.USE_FREQ_BIAS = False
__C.MODEL.USE_OVLP_FILTER = False
__C.MODEL.USE_SPATIAL_FEAT = False
__C.MODEL.USE_SEM_FEAT = False
__C.MODEL.USE_SIMPLE_P = False
__C.MODEL.ADD_SCORES_ALL = False
__C.MODEL.ADD_SO_SCORES = False
__C.MODEL.USE_BG = False
__C.MODEL.UNFREEZE_DET = False
# The backbone conv body to use
__C.MODEL.CONV_BODY = ''
__C.MODEL.USE_REL_PYRAMID = False
__C.MODEL.USE_NODE_CONTRASTIVE_LOSS = False
__C.MODEL.NODE_CONTRASTIVE_MARGIN = 0.2
__C.MODEL.NODE_CONTRASTIVE_WEIGHT = 1.0
__C.MODEL.USE_NODE_CONTRASTIVE_SO_AWARE_LOSS = False
__C.MODEL.NODE_CONTRASTIVE_SO_AWARE_MARGIN = 0.2
__C.MODEL.NODE_CONTRASTIVE_SO_AWARE_WEIGHT = 1.0
__C.MODEL.USE_NODE_CONTRASTIVE_P_AWARE_LOSS = False
__C.MODEL.NODE_CONTRASTIVE_P_AWARE_MARGIN = 0.2
__C.MODEL.NODE_CONTRASTIVE_P_AWARE_WEIGHT = 1.0
__C.MODEL.NODE_SAMPLE_SIZE = 128
__C.MODEL.USE_SPO_AGNOSTIC_COMPENSATION = False
# Number of classes in the dataset; must be set
# E.g., 81 for COCO (80 foreground + 1 background)
__C.MODEL.NUM_CLASSES = -1
__C.MODEL.NUM_PRD_CLASSES = -1
# Use a class agnostic bounding box regressor instead of the default per-class
# regressor
__C.MODEL.CLS_AGNOSTIC_BBOX_REG = False
# Default weights on (dx, dy, dw, dh) for normalizing bbox regression targets
# These are empirically chosen to approximately lead to unit variance targets
#
# In older versions, the weights were set such that the regression deltas
# would have unit standard deviation on the training dataset. Presently, rather
# than computing these statistics exactly, we use a fixed set of weights
# (10., 10., 5., 5.) by default. These are approximately the weights one would
# get from COCO using the previous unit stdev heuristic.
__C.MODEL.BBOX_REG_WEIGHTS = (10., 10., 5., 5.)
# The meaning of FASTER_RCNN depends on the context (training vs. inference):
# 1) During training, FASTER_RCNN = True means that end-to-end training will be
# used to jointly train the RPN subnetwork and the Fast R-CNN subnetwork
# (Faster R-CNN = RPN + Fast R-CNN).
# 2) During inference, FASTER_RCNN = True means that the model's RPN subnetwork
# will be used to generate proposals rather than relying on precomputed
# proposals. Note that FASTER_RCNN = True can be used at inference time even
# if the Faster R-CNN model was trained with stagewise training (which
# consists of alternating between RPN and Fast R-CNN training in a way that
# finally leads to a single network).
__C.MODEL.FASTER_RCNN = False
# Indicates the model makes instance mask predictions (as in Mask R-CNN)
__C.MODEL.MASK_ON = False
# Indicates the model makes keypoint predictions (as in Mask R-CNN for
# keypoints)
__C.MODEL.KEYPOINTS_ON = False
# Indicates the model's computation terminates with the production of RPN
# proposals (i.e., it outputs proposals ONLY, no actual object detections)
__C.MODEL.RPN_ONLY = False
# [Inferred value; do not set directly in a config]
# Indicate whether the res5 stage weights and training forward computation
# are shared from box head or not.
__C.MODEL.SHARE_RES5 = False
# Whether to load imagenet pretrained weights
# If True, path to the weight file must be specified.
# See: __C.RESNETS.IMAGENET_PRETRAINED_WEIGHTS
__C.MODEL.LOAD_IMAGENET_PRETRAINED_WEIGHTS = False
__C.MODEL.LOAD_COCO_PRETRAINED_WEIGHTS = False
__C.MODEL.LOAD_VRD_PRETRAINED_WEIGHTS = False
# ---------------------------------------------------------------------------- #
# Unsupervise Pose
# ---------------------------------------------------------------------------- #
__C.MODEL.UNSUPERVISED_POSE = False
# ---------------------------------------------------------------------------- #
# RetinaNet options
# ---------------------------------------------------------------------------- #
__C.RETINANET = AttrDict()
# RetinaNet is used (instead of Fast/er/Mask R-CNN/R-FCN/RPN) if True
__C.RETINANET.RETINANET_ON = False
# Anchor aspect ratios to use
__C.RETINANET.ASPECT_RATIOS = (0.5, 1.0, 2.0)
# Anchor scales per octave
__C.RETINANET.SCALES_PER_OCTAVE = 3
# At each FPN level, we generate anchors based on their scale, aspect_ratio,
# stride of the level, and we multiply the resulting anchor by ANCHOR_SCALE
__C.RETINANET.ANCHOR_SCALE = 4
# Convolutions to use in the cls and bbox tower
# NOTE: this doesn't include the last conv for logits
__C.RETINANET.NUM_CONVS = 4
# Weight for bbox_regression loss
__C.RETINANET.BBOX_REG_WEIGHT = 1.0
# Smooth L1 loss beta for bbox regression
__C.RETINANET.BBOX_REG_BETA = 0.11
# During inference, #locs to select based on cls score before NMS is performed
# per FPN level
__C.RETINANET.PRE_NMS_TOP_N = 1000
# IoU overlap ratio for labeling an anchor as positive
# Anchors with >= iou overlap are labeled positive
__C.RETINANET.POSITIVE_OVERLAP = 0.5
# IoU overlap ratio for labeling an anchor as negative
# Anchors with < iou overlap are labeled negative
__C.RETINANET.NEGATIVE_OVERLAP = 0.4
# Focal loss parameter: alpha
__C.RETINANET.LOSS_ALPHA = 0.25
# Focal loss parameter: gamma
__C.RETINANET.LOSS_GAMMA = 2.0
# Prior prob for the positives at the beginning of training. This is used to set
# the bias init for the logits layer
__C.RETINANET.PRIOR_PROB = 0.01
# Whether classification and bbox branch tower should be shared or not
__C.RETINANET.SHARE_CLS_BBOX_TOWER = False
# Use class specific bounding box regression instead of the default class
# agnostic regression
__C.RETINANET.CLASS_SPECIFIC_BBOX = False
# Whether softmax should be used in classification branch training
__C.RETINANET.SOFTMAX = False
# Inference cls score threshold, anchors with score > INFERENCE_TH are
# considered for inference
__C.RETINANET.INFERENCE_TH = 0.05
# ---------------------------------------------------------------------------- #
# Solver options
# Note: all solver options are used exactly as specified; the implication is
# that if you switch from training on 1 GPU to N GPUs, you MUST adjust the
# solver configuration accordingly. We suggest using gradual warmup and the
# linear learning rate scaling rule as described in
# "Accurate, Large Minibatch SGD: Training ImageNet in 1 Hour" Goyal et al.
# https://arxiv.org/abs/1706.02677
# ---------------------------------------------------------------------------- #
__C.SOLVER = AttrDict()
# e.g 'SGD', 'Adam'
__C.SOLVER.TYPE = 'SGD'
# Base learning rate for the specified schedule
__C.SOLVER.BASE_LR = 0.001
__C.SOLVER.BACKBONE_LR_SCALAR = 0.1
# Schedule type (see functions in utils.lr_policy for options)
# E.g., 'step', 'steps_with_decay', ...
__C.SOLVER.LR_POLICY = 'step'
# Some LR Policies (by example):
# 'step'
# lr = SOLVER.BASE_LR * SOLVER.GAMMA ** (cur_iter // SOLVER.STEP_SIZE)
# 'steps_with_decay'
# SOLVER.STEPS = [0, 60000, 80000]
# SOLVER.GAMMA = 0.1
# lr = SOLVER.BASE_LR * SOLVER.GAMMA ** current_step
# iters [0, 59999] are in current_step = 0, iters [60000, 79999] are in
# current_step = 1, and so on
# 'steps_with_lrs'
# SOLVER.STEPS = [0, 60000, 80000]
# SOLVER.LRS = [0.02, 0.002, 0.0002]
# lr = LRS[current_step]
# Hyperparameter used by the specified policy
# For 'step', the current LR is multiplied by SOLVER.GAMMA at each step
__C.SOLVER.GAMMA = 0.1
# Uniform step size for 'steps' policy
__C.SOLVER.STEP_SIZE = 30000
# Non-uniform step iterations for 'steps_with_decay' or 'steps_with_lrs'
# policies
__C.SOLVER.STEPS = []
# Learning rates to use with 'steps_with_lrs' policy
__C.SOLVER.LRS = []
# Maximum number of SGD iterations
__C.SOLVER.MAX_ITER = 40000
# Momentum to use with SGD
__C.SOLVER.MOMENTUM = 0.9
# L2 regularization hyperparameter
__C.SOLVER.WEIGHT_DECAY = 0.0005
# L2 regularization hyperparameter for GroupNorm's parameters
__C.SOLVER.WEIGHT_DECAY_GN = 0.0
# Whether to double the learning rate for bias
__C.SOLVER.BIAS_DOUBLE_LR = True
# Whether to have weight decay on bias as well
__C.SOLVER.BIAS_WEIGHT_DECAY = False
# Warm up to SOLVER.BASE_LR over this number of SGD iterations
__C.SOLVER.WARM_UP_ITERS = 500
# Start the warm up from SOLVER.BASE_LR * SOLVER.WARM_UP_FACTOR
__C.SOLVER.WARM_UP_FACTOR = 1.0 / 3.0
# WARM_UP_METHOD can be either 'constant' or 'linear' (i.e., gradual)
__C.SOLVER.WARM_UP_METHOD = 'linear'
# Scale the momentum update history by new_lr / old_lr when updating the
# learning rate (this is correct given MomentumSGDUpdateOp)
__C.SOLVER.SCALE_MOMENTUM = True
# Only apply the correction if the relative LR change exceeds this threshold
# (prevents ever change in linear warm up from scaling the momentum by a tiny
# amount; momentum scaling is only important if the LR change is large)
__C.SOLVER.SCALE_MOMENTUM_THRESHOLD = 1.1
# Suppress logging of changes to LR unless the relative change exceeds this
# threshold (prevents linear warm up from spamming the training log)
__C.SOLVER.LOG_LR_CHANGE_THRESHOLD = 1.1
# ---------------------------------------------------------------------------- #
# Fast R-CNN options
# ---------------------------------------------------------------------------- #
__C.FAST_RCNN = AttrDict()
# The type of RoI head to use for bounding box classification and regression
# The string must match a function this is imported in modeling.model_builder
# (e.g., 'head_builder.add_roi_2mlp_head' to specify a two hidden layer MLP)
__C.FAST_RCNN.ROI_BOX_HEAD = ''
__C.FAST_RCNN.PRD_HEAD = ''
# Hidden layer dimension when using an MLP for the RoI box head
__C.FAST_RCNN.MLP_HEAD_DIM = 1024
# Hidden Conv layer dimension when using Convs for the RoI box head
__C.FAST_RCNN.CONV_HEAD_DIM = 256
# Number of stacked Conv layers in the RoI box head
__C.FAST_RCNN.NUM_STACKED_CONVS = 4
# RoI transformation function (e.g., RoIPool or RoIAlign)
# (RoIPoolF is the same as RoIPool; ignore the trailing 'F')
__C.FAST_RCNN.ROI_XFORM_METHOD = 'RoIPoolF'
# Number of grid sampling points in RoIAlign (usually use 2)
# Only applies to RoIAlign
__C.FAST_RCNN.ROI_XFORM_SAMPLING_RATIO = 0
# RoI transform output resolution
# Note: some models may have constraints on what they can use, e.g. they use
# pretrained FC layers like in VGG16, and will ignore this option
__C.FAST_RCNN.ROI_XFORM_RESOLUTION = 14
# ---------------------------------------------------------------------------- #
# RPN options
# ---------------------------------------------------------------------------- #
__C.RPN = AttrDict()
# [Infered value; do not set directly in a config]
# Indicates that the model contains an RPN subnetwork
__C.RPN.RPN_ON = False
# `True` for Detectron implementation. `False` for jwyang's implementation.
__C.RPN.OUT_DIM_AS_IN_DIM = True
# Output dim of conv2d. Ignored if `__C.RPN.OUT_DIM_AS_IN_DIM` is True.
# 512 is the fixed value in jwyang's implementation.
__C.RPN.OUT_DIM = 512
# 'sigmoid' or 'softmax'. Detectron use 'sigmoid'. jwyang use 'softmax'
# This will affect the conv2d output dim for classifying the bg/fg rois
__C.RPN.CLS_ACTIVATION = 'sigmoid'
# RPN anchor sizes given in absolute pixels w.r.t. the scaled network input
# Note: these options are *not* used by FPN RPN; see FPN.RPN* options
__C.RPN.SIZES = (64, 128, 256, 512)
# Stride of the feature map that RPN is attached
__C.RPN.STRIDE = 16
# RPN anchor aspect ratios
__C.RPN.ASPECT_RATIOS = (0.5, 1, 2)
# ---------------------------------------------------------------------------- #
# FPN options
# ---------------------------------------------------------------------------- #
__C.FPN = AttrDict()
# FPN is enabled if True
__C.FPN.FPN_ON = False
# Channel dimension of the FPN feature levels
__C.FPN.DIM = 256
# Initialize the lateral connections to output zero if True
__C.FPN.ZERO_INIT_LATERAL = False
# Stride of the coarsest FPN level
# This is needed so the input can be padded properly
__C.FPN.COARSEST_STRIDE = 32
#
# FPN may be used for just RPN, just object detection, or both
#
# Use FPN for RoI transform for object detection if True
__C.FPN.MULTILEVEL_ROIS = False
# Hyperparameters for the RoI-to-FPN level mapping heuristic
__C.FPN.ROI_CANONICAL_SCALE = 224 # s0
__C.FPN.ROI_CANONICAL_LEVEL = 4 # k0: where s0 maps to
# Coarsest level of the FPN pyramid
__C.FPN.ROI_MAX_LEVEL = 5
# Finest level of the FPN pyramid
__C.FPN.ROI_MIN_LEVEL = 2
# Use FPN for RPN if True
__C.FPN.MULTILEVEL_RPN = False
# Coarsest level of the FPN pyramid
__C.FPN.RPN_MAX_LEVEL = 6
# Finest level of the FPN pyramid
__C.FPN.RPN_MIN_LEVEL = 2
# FPN RPN anchor aspect ratios
__C.FPN.RPN_ASPECT_RATIOS = (0.5, 1, 2)
# RPN anchors start at this size on RPN_MIN_LEVEL
# The anchor size doubled each level after that
# With a default of 32 and levels 2 to 6, we get anchor sizes of 32 to 512
__C.FPN.RPN_ANCHOR_START_SIZE = 32
# [Infered Value] Scale for RPN_POST_NMS_TOP_N.
# Automatically infered in training, fixed to 1 in testing.
__C.FPN.RPN_COLLECT_SCALE = 1
# Use extra FPN levels, as done in the RetinaNet paper
__C.FPN.EXTRA_CONV_LEVELS = False
# Use GroupNorm in the FPN-specific layers (lateral, etc.)
__C.FPN.USE_GN = False
# ---------------------------------------------------------------------------- #
# Mask R-CNN options ("MRCNN" means Mask R-CNN)
# ---------------------------------------------------------------------------- #
__C.MRCNN = AttrDict()
# The type of RoI head to use for instance mask prediction
# The string must match a function this is imported in modeling.model_builder
# (e.g., 'mask_rcnn_heads.ResNet_mask_rcnn_fcn_head_v1up4convs')
__C.MRCNN.ROI_MASK_HEAD = ''
# Resolution of mask predictions
__C.MRCNN.RESOLUTION = 14
# RoI transformation function and associated options
__C.MRCNN.ROI_XFORM_METHOD = 'RoIAlign'
# RoI transformation function (e.g., RoIPool or RoIAlign)
__C.MRCNN.ROI_XFORM_RESOLUTION = 7
# Number of grid sampling points in RoIAlign (usually use 2)
# Only applies to RoIAlign
__C.MRCNN.ROI_XFORM_SAMPLING_RATIO = 0
# Number of channels in the mask head
__C.MRCNN.DIM_REDUCED = 256
# Use dilated convolution in the mask head
__C.MRCNN.DILATION = 2
# Upsample the predicted masks by this factor
__C.MRCNN.UPSAMPLE_RATIO = 1
# Use a fully-connected layer to predict the final masks instead of a conv layer
__C.MRCNN.USE_FC_OUTPUT = False
# Weight initialization method for the mask head and mask output layers. ['GaussianFill', 'MSRAFill']
__C.MRCNN.CONV_INIT = 'GaussianFill'
# Use class specific mask predictions if True (otherwise use class agnostic mask
# predictions)
__C.MRCNN.CLS_SPECIFIC_MASK = True
# Multi-task loss weight for masks
__C.MRCNN.WEIGHT_LOSS_MASK = 1.0
# Binarization threshold for converting soft masks to hard masks
__C.MRCNN.THRESH_BINARIZE = 0.5
__C.MRCNN.MEMORY_EFFICIENT_LOSS = True # TODO
# ---------------------------------------------------------------------------- #
# Keyoint Mask R-CNN options ("KRCNN" = Mask R-CNN with Keypoint support)
# ---------------------------------------------------------------------------- #
__C.KRCNN = AttrDict()
# The type of RoI head to use for instance keypoint prediction
# The string must match a function this is imported in modeling.model_builder
# (e.g., 'keypoint_rcnn_heads.add_roi_pose_head_v1convX')
__C.KRCNN.ROI_KEYPOINTS_HEAD = ''
# Output size (and size loss is computed on), e.g., 56x56
__C.KRCNN.HEATMAP_SIZE = -1
# Use bilinear interpolation to upsample the final heatmap by this factor
__C.KRCNN.UP_SCALE = -1
# Apply a ConvTranspose layer to the hidden representation computed by the
# keypoint head prior to predicting the per-keypoint heatmaps
__C.KRCNN.USE_DECONV = False
# Channel dimension of the hidden representation produced by the ConvTranspose
__C.KRCNN.DECONV_DIM = 256
# Use a ConvTranspose layer to predict the per-keypoint heatmaps
__C.KRCNN.USE_DECONV_OUTPUT = False
# Use dilation in the keypoint head
__C.KRCNN.DILATION = 1
# Size of the kernels to use in all ConvTranspose operations
__C.KRCNN.DECONV_KERNEL = 4
# Number of keypoints in the dataset (e.g., 17 for COCO)
__C.KRCNN.NUM_KEYPOINTS = -1
# Number of stacked Conv layers in keypoint head
__C.KRCNN.NUM_STACKED_CONVS = 8
# Dimension of the hidden representation output by the keypoint head
__C.KRCNN.CONV_HEAD_DIM = 256
# Conv kernel size used in the keypoint head
__C.KRCNN.CONV_HEAD_KERNEL = 3
# Conv kernel weight filling function
__C.KRCNN.CONV_INIT = 'GaussianFill'
# Use NMS based on OKS if True
__C.KRCNN.NMS_OKS = False
# Source of keypoint confidence
# Valid options: ('bbox', 'logit', 'prob')
__C.KRCNN.KEYPOINT_CONFIDENCE = 'bbox'
# Standard ROI XFORM options (see FAST_RCNN or MRCNN options)
__C.KRCNN.ROI_XFORM_METHOD = 'RoIAlign'
__C.KRCNN.ROI_XFORM_RESOLUTION = 7
__C.KRCNN.ROI_XFORM_SAMPLING_RATIO = 0
# Minimum number of labeled keypoints that must exist in a minibatch (otherwise
# the minibatch is discarded)
__C.KRCNN.MIN_KEYPOINT_COUNT_FOR_VALID_MINIBATCH = 20
# When infering the keypoint locations from the heatmap, don't scale the heatmap
# below this minimum size
__C.KRCNN.INFERENCE_MIN_SIZE = 0
# Multi-task loss weight to use for keypoints
# Recommended values:
# - use 1.0 if KRCNN.NORMALIZE_BY_VISIBLE_KEYPOINTS is True
# - use 4.0 if KRCNN.NORMALIZE_BY_VISIBLE_KEYPOINTS is False
__C.KRCNN.LOSS_WEIGHT = 1.0
# Normalize by the total number of visible keypoints in the minibatch if True.
# Otherwise, normalize by the total number of keypoints that could ever exist
# in the minibatch. See comments in modeling.model_builder.add_keypoint_losses
# for detailed discussion.
__C.KRCNN.NORMALIZE_BY_VISIBLE_KEYPOINTS = True
# ---------------------------------------------------------------------------- #
# R-FCN options
# ---------------------------------------------------------------------------- #
__C.RFCN = AttrDict()
# Position-sensitive RoI pooling output grid size (height and width)
__C.RFCN.PS_GRID_SIZE = 3
# ---------------------------------------------------------------------------- #
# ResNets options ("ResNets" = ResNet and ResNeXt)
# ---------------------------------------------------------------------------- #
__C.RESNETS = AttrDict()
# Number of groups to use; 1 ==> ResNet; > 1 ==> ResNeXt
__C.RESNETS.NUM_GROUPS = 1
# Baseline width of each group
__C.RESNETS.WIDTH_PER_GROUP = 64
# Place the stride 2 conv on the 1x1 filter
# Use True only for the original MSRA ResNet; use False for C2 and Torch models
__C.RESNETS.STRIDE_1X1 = True
# Residual transformation function
__C.RESNETS.TRANS_FUNC = 'bottleneck_transformation'
# ResNet's stem function (conv1 and pool1)
__C.RESNETS.STEM_FUNC = 'basic_bn_stem'
# ResNet's shortcut function
__C.RESNETS.SHORTCUT_FUNC = 'basic_bn_shortcut'
# Apply dilation in stage "res5"
__C.RESNETS.RES5_DILATION = 1
# Freeze model weights before and including which block.
# Choices: [0, 2, 3, 4, 5]. O means not fixed. First conv and bn are defaults to
# be fixed.
__C.RESNETS.FREEZE_AT = 2
# Path to pretrained resnet weights on ImageNet.
# If start with '/', then it is treated as a absolute path.
# Otherwise, treat as a relative path to __C.ROOT_DIR
__C.RESNETS.IMAGENET_PRETRAINED_WEIGHTS = ''
__C.RESNETS.COCO_PRETRAINED_WEIGHTS = ''
__C.RESNETS.OI_PRETRAINED_WEIGHTS = ''
__C.RESNETS.OI_REL_PRETRAINED_WEIGHTS = ''
__C.RESNETS.OI_REL_PRD_PRETRAINED_WEIGHTS = ''
__C.RESNETS.VRD_PRETRAINED_WEIGHTS = ''
__C.RESNETS.VRD_PRD_PRETRAINED_WEIGHTS = ''
__C.RESNETS.VG_PRETRAINED_WEIGHTS = ''
__C.RESNETS.VG_PRD_PRETRAINED_WEIGHTS = ''
__C.RESNETS.TO_BE_FINETUNED_WEIGHTS = ''
__C.RESNETS.REL_PRETRAINED_WEIGHTS = ''
# Use GroupNorm instead of BatchNorm
__C.RESNETS.USE_GN = False
__C.VGG16 = AttrDict()
__C.VGG16.IMAGENET_PRETRAINED_WEIGHTS = ''
__C.VGG16.COCO_PRETRAINED_WEIGHTS = ''
__C.VGG16.OI_PRETRAINED_WEIGHTS = ''
__C.VGG16.OI_REL_PRETRAINED_WEIGHTS = ''
__C.VGG16.OI_REL_PRD_PRETRAINED_WEIGHTS = ''
__C.VGG16.VRD_PRETRAINED_WEIGHTS = ''
__C.VGG16.VRD_PRD_PRETRAINED_WEIGHTS = ''
__C.VGG16.VG_PRETRAINED_WEIGHTS = ''
__C.VGG16.VG_PRD_PRETRAINED_WEIGHTS = ''
__C.VGG16.TO_BE_FINETUNED_WEIGHTS = ''
# ---------------------------------------------------------------------------- #
# GroupNorm options
# ---------------------------------------------------------------------------- #
__C.GROUP_NORM = AttrDict()
# Number of dimensions per group in GroupNorm (-1 if using NUM_GROUPS)
__C.GROUP_NORM.DIM_PER_GP = -1
# Number of groups in GroupNorm (-1 if using DIM_PER_GP)
__C.GROUP_NORM.NUM_GROUPS = 32
# GroupNorm's small constant in the denominator
__C.GROUP_NORM.EPSILON = 1e-5
# ---------------------------------------------------------------------------- #
# MISC options
# ---------------------------------------------------------------------------- #
# Number of GPUs to use (applies to both training and testing)
__C.NUM_GPUS = 1
# The mapping from image coordinates to feature map coordinates might cause
# some boxes that are distinct in image space to become identical in feature
# coordinates. If DEDUP_BOXES > 0, then DEDUP_BOXES is used as the scale factor
# for identifying duplicate boxes.
# 1/16 is correct for {Alex,Caffe}Net, VGG_CNN_M_1024, and VGG16
__C.DEDUP_BOXES = 1. / 16.
# Clip bounding box transformation predictions to prevent np.exp from
# overflowing
# Heuristic choice based on that would scale a 16 pixel anchor up to 1000 pixels
__C.BBOX_XFORM_CLIP = np.log(1000. / 16.)
# Pixel mean values (BGR order) as a (1, 1, 3) array
# We use the same pixel mean for all networks even though it's not exactly what
# they were trained with
# "Fun" fact: the history of where these values comes from is lost (From Detectron lol)
__C.PIXEL_MEANS = np.array([[[102.9801, 115.9465, 122.7717]]])
# For reproducibility
__C.RNG_SEED = 3
# A small number that's used many times
__C.EPS = 1e-14
# Root directory of project
__C.ROOT_DIR = osp.abspath(osp.join(osp.dirname(__file__), '..', '..'))
# Output basedir
__C.OUTPUT_DIR = 'Outputs'
# Name (or path to) the matlab executable
__C.MATLAB = 'matlab'
# Dump detection visualizations
__C.VIS = False
# Score threshold for visualization
__C.VIS_TH = 0.9
# Expected results should take the form of a list of expectations, each
# specified by four elements (dataset, task, metric, expected value). For
# example: [['coco_2014_minival', 'box_proposal', 'AR@1000', 0.387]]
__C.EXPECTED_RESULTS = []
# Absolute and relative tolerance to use when comparing to EXPECTED_RESULTS
__C.EXPECTED_RESULTS_RTOL = 0.1
__C.EXPECTED_RESULTS_ATOL = 0.005
# Set to send email in case of an EXPECTED_RESULTS failure
__C.EXPECTED_RESULTS_EMAIL = ''
# ------------------------------
# Data directory
__C.DATA_DIR = osp.abspath(osp.join(__C.ROOT_DIR, 'data'))
# [Deprecate]
__C.POOLING_MODE = 'crop'
# [Deprecate] Size of the pooled region after RoI pooling
__C.POOLING_SIZE = 7
__C.CROP_RESIZE_WITH_MAX_POOL = True
# [Infered value]
__C.CUDA = False
__C.DEBUG = False
# [Infered value]
__C.PYTORCH_VERSION_LESS_THAN_040 = False
# ---------------------------------------------------------------------------- #
# mask heads or keypoint heads that share res5 stage weights and
# training forward computation with box head.
# ---------------------------------------------------------------------------- #
_SHARE_RES5_HEADS = set(
[
'mask_rcnn_heads.mask_rcnn_fcn_head_v0upshare',
]
)
def assert_and_infer_cfg(make_immutable=True):
"""Call this function in your script after you have finished setting all cfg
values that are necessary (e.g., merging a config from a file, merging
command line config options, etc.). By default, this function will also
mark the global cfg as immutable to prevent changing the global cfg settings
during script execution (which can lead to hard to debug errors or code
that's harder to understand than is necessary).
"""
if __C.MODEL.RPN_ONLY or __C.MODEL.FASTER_RCNN:
__C.RPN.RPN_ON = True
if __C.RPN.RPN_ON or __C.RETINANET.RETINANET_ON:
__C.TEST.PRECOMPUTED_PROPOSALS = False
if __C.MODEL.LOAD_IMAGENET_PRETRAINED_WEIGHTS:
assert __C.RESNETS.IMAGENET_PRETRAINED_WEIGHTS or __C.VGG16.IMAGENET_PRETRAINED_WEIGHTS, \
"Path to the weight file must not be empty to load imagenet pertrained resnets."
if set([__C.MRCNN.ROI_MASK_HEAD, __C.KRCNN.ROI_KEYPOINTS_HEAD]) & _SHARE_RES5_HEADS:
__C.MODEL.SHARE_RES5 = True
if version.parse(torch.__version__) < version.parse('0.4.0'):
__C.PYTORCH_VERSION_LESS_THAN_040 = True
# create alias for PyTorch version less than 0.4.0
init.uniform_ = init.uniform
init.normal_ = init.normal
init.constant_ = init.constant
nn.GroupNorm = mynn.GroupNorm
if make_immutable:
cfg.immutable(True)
def merge_cfg_from_file(cfg_filename):
"""Load a yaml config file and merge it into the global config."""
with open(cfg_filename, 'r') as f:
yaml_cfg = AttrDict(yaml.load(f))
_merge_a_into_b(yaml_cfg, __C)
cfg_from_file = merge_cfg_from_file
def merge_cfg_from_cfg(cfg_other):
"""Merge `cfg_other` into the global config."""
_merge_a_into_b(cfg_other, __C)
def merge_cfg_from_list(cfg_list):
"""Merge config keys, values in a list (e.g., from command line) into the
global config. For example, `cfg_list = ['TEST.NMS', 0.5]`.
"""
assert len(cfg_list) % 2 == 0
for full_key, v in zip(cfg_list[0::2], cfg_list[1::2]):
# if _key_is_deprecated(full_key):
# continue
# if _key_is_renamed(full_key):
# _raise_key_rename_error(full_key)
key_list = full_key.split('.')
d = __C
for subkey in key_list[:-1]:
assert subkey in d, 'Non-existent key: {}'.format(full_key)
d = d[subkey]
subkey = key_list[-1]
assert subkey in d, 'Non-existent key: {}'.format(full_key)
value = _decode_cfg_value(v)
value = _check_and_coerce_cfg_value_type(
value, d[subkey], subkey, full_key
)
d[subkey] = value
cfg_from_list = merge_cfg_from_list
def _merge_a_into_b(a, b, stack=None):
"""Merge config dictionary a into config dictionary b, clobbering the
options in b whenever they are also specified in a.
"""
assert isinstance(a, AttrDict), 'Argument `a` must be an AttrDict'
assert isinstance(b, AttrDict), 'Argument `b` must be an AttrDict'
for k, v_ in a.items():
full_key = '.'.join(stack) + '.' + k if stack is not None else k
# a must specify keys that are in b
if k not in b:
# if _key_is_deprecated(full_key):
# continue
# elif _key_is_renamed(full_key):
# _raise_key_rename_error(full_key)
# else:
raise KeyError('Non-existent config key: {}'.format(full_key))
v = copy.deepcopy(v_)
v = _decode_cfg_value(v)
v = _check_and_coerce_cfg_value_type(v, b[k], k, full_key)
# Recursively merge dicts
if isinstance(v, AttrDict):
try:
stack_push = [k] if stack is None else stack + [k]
_merge_a_into_b(v, b[k], stack=stack_push)
except BaseException:
raise
else:
b[k] = v
def _decode_cfg_value(v):
"""Decodes a raw config value (e.g., from a yaml config files or command
line argument) into a Python object.
"""
# Configs parsed from raw yaml will contain dictionary keys that need to be
# converted to AttrDict objects
if isinstance(v, dict):
return AttrDict(v)
# All remaining processing is only applied to strings
if not isinstance(v, six.string_types):
return v
# Try to interpret `v` as a:
# string, number, tuple, list, dict, boolean, or None
try:
v = literal_eval(v)
# The following two excepts allow v to pass through when it represents a
# string.
#
# Longer explanation:
# The type of v is always a string (before calling literal_eval), but
# sometimes it *represents* a string and other times a data structure, like
# a list. In the case that v represents a string, what we got back from the
# yaml parser is 'foo' *without quotes* (so, not '"foo"'). literal_eval is
# ok with '"foo"', but will raise a ValueError if given 'foo'. In other
# cases, like paths (v = 'foo/bar' and not v = '"foo/bar"'), literal_eval
# will raise a SyntaxError.
except ValueError:
pass
except SyntaxError:
pass
return v
def _check_and_coerce_cfg_value_type(value_a, value_b, key, full_key):
"""Checks that `value_a`, which is intended to replace `value_b` is of the
right type. The type is correct if it matches exactly or is one of a few
cases in which the type can be easily coerced.
"""
# The types must match (with some exceptions)
type_b = type(value_b)
type_a = type(value_a)
if type_a is type_b:
return value_a
# Exceptions: numpy arrays, strings, tuple<->list
if isinstance(value_b, np.ndarray):
value_a = np.array(value_a, dtype=value_b.dtype)
elif isinstance(value_b, six.string_types):
value_a = str(value_a)
elif isinstance(value_a, tuple) and isinstance(value_b, list):
value_a = list(value_a)
elif isinstance(value_a, list) and isinstance(value_b, tuple):
value_a = tuple(value_a)
else:
raise ValueError(
'Type mismatch ({} vs. {}) with values ({} vs. {}) for config '
'key: {}'.format(type_b, type_a, value_b, value_a, full_key)
)
return value_a
| ContrastiveLosses4VRD-master | lib/core/config.py |
ContrastiveLosses4VRD-master | lib/core/__init__.py |
|
# Adapted by Ji Zhang in 2019
# From Detectron.pytorch/lib/core/test.py
# Original license text below
# --------------------------------------------------------
# Written by Roy Tseng
#
# Based on:
# --------------------------------------------------------
# Copyright (c) 2017-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
#
# Based on:
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from collections import defaultdict
from six.moves import cPickle as pickle
import cv2
import logging
import numpy as np
from torch.autograd import Variable
import torch
from core.config import cfg
from utils.timer import Timer
import utils.blob as blob_utils
import utils.fpn as fpn_utils
import utils.image as image_utils
logger = logging.getLogger(__name__)
def im_detect_rels(model, im, dataset_name, box_proposals, do_vis=False, timers=None, roidb=None, use_gt_labels=False):
if timers is None:
timers = defaultdict(Timer)
timers['im_detect_rels'].tic()
rel_results = im_get_det_rels(model, im, dataset_name, cfg.TEST.SCALE, cfg.TEST.MAX_SIZE, box_proposals, do_vis, roidb, use_gt_labels)
timers['im_detect_rels'].toc()
return rel_results
def im_get_det_rels(model, im, dataset_name, target_scale, target_max_size, boxes=None, do_vis=False, roidb=None, use_gt_labels=False):
"""Prepare the bbox for testing"""
inputs, im_scale = _get_blobs(im, boxes, target_scale, target_max_size)
if cfg.DEDUP_BOXES > 0 and not cfg.MODEL.FASTER_RCNN:
v = np.array([1, 1e3, 1e6, 1e9, 1e12])
hashes = np.round(inputs['rois'] * cfg.DEDUP_BOXES).dot(v)
_, index, inv_index = np.unique(
hashes, return_index=True, return_inverse=True
)
inputs['rois'] = inputs['rois'][index, :]
boxes = boxes[index, :]
# Add multi-level rois for FPN
if cfg.FPN.MULTILEVEL_ROIS and not cfg.MODEL.FASTER_RCNN:
_add_multilevel_rois_for_test(inputs, 'rois')
if cfg.PYTORCH_VERSION_LESS_THAN_040:
inputs['data'] = [Variable(torch.from_numpy(inputs['data']), volatile=True)]
inputs['im_info'] = [Variable(torch.from_numpy(inputs['im_info']), volatile=True)]
else:
inputs['data'] = [torch.from_numpy(inputs['data'])]
inputs['im_info'] = [torch.from_numpy(inputs['im_info'])]
if dataset_name is not None:
inputs['dataset_name'] = [blob_utils.serialize(dataset_name)]
inputs['do_vis'] = [do_vis]
if roidb is not None:
inputs['roidb'] = [roidb]
if use_gt_labels:
inputs['use_gt_labels'] = [use_gt_labels]
return_dict = model(**inputs)
return_dict2 = {}
if return_dict['sbj_rois'] is not None:
sbj_boxes = return_dict['sbj_rois'].data.cpu().numpy()[:, 1:5] / im_scale
sbj_labels = return_dict['sbj_labels'].data.cpu().numpy() - 1
sbj_scores = return_dict['sbj_scores'].data.cpu().numpy()
obj_boxes = return_dict['obj_rois'].data.cpu().numpy()[:, 1:5] / im_scale
obj_labels = return_dict['obj_labels'].data.cpu().numpy() - 1
obj_scores = return_dict['obj_scores'].data.cpu().numpy()
prd_scores = return_dict['prd_scores'].data.cpu().numpy()
if cfg.MODEL.USE_FREQ_BIAS:
prd_scores_bias = return_dict['prd_scores_bias'].data.cpu().numpy()
if cfg.MODEL.USE_SPATIAL_FEAT:
prd_scores_spt = return_dict['prd_scores_spt'].data.cpu().numpy()
if cfg.MODEL.ADD_SCORES_ALL:
prd_scores_ttl = return_dict['prd_ttl_scores'].data.cpu().numpy()
return_dict2 = dict(sbj_boxes=sbj_boxes,
sbj_labels=sbj_labels.astype(np.int32, copy=False),
sbj_scores=sbj_scores,
obj_boxes=obj_boxes,
obj_labels=obj_labels.astype(np.int32, copy=False),
obj_scores=obj_scores,
prd_scores=prd_scores)
if cfg.MODEL.ADD_SCORES_ALL:
return_dict2['prd_scores_ttl'] = prd_scores_ttl
if cfg.MODEL.USE_FREQ_BIAS:
return_dict2['prd_scores_bias'] = prd_scores_bias
if cfg.MODEL.USE_SPATIAL_FEAT:
return_dict2['prd_scores_spt'] = prd_scores_spt
if do_vis:
if isinstance(return_dict['blob_conv'], list):
blob_conv = [b.data.cpu().numpy().squeeze() for b in return_dict['blob_conv']]
blob_conv_prd = [b.data.cpu().numpy().squeeze() for b in return_dict['blob_conv_prd']]
blob_conv = [b.mean(axis=0) for b in blob_conv]
blob_conv_prd = [b.mean(axis=0) for b in blob_conv_prd]
return_dict2['blob_conv'] = blob_conv
return_dict2['blob_conv_prd'] = blob_conv_prd
else:
blob_conv = return_dict['blob_conv'].data.cpu().numpy().squeeze()
blob_conv_prd = return_dict['blob_conv_prd'].data.cpu().numpy().squeeze()
blob_conv = blob_conv.mean(axis=0)
blob_conv_prd = blob_conv_prd.mean(axis=0)
return_dict2['blob_conv'] = blob_conv
return_dict2['blob_conv_prd'] = blob_conv_prd
else:
return_dict2 = dict(sbj_boxes=None,
sbj_labels=None,
sbj_scores=None,
obj_boxes=None,
obj_labels=None,
obj_scores=None,
prd_scores=None)
return return_dict2
def _get_rois_blob(im_rois, im_scale):
"""Converts RoIs into network inputs.
Arguments:
im_rois (ndarray): R x 4 matrix of RoIs in original image coordinates
im_scale_factors (list): scale factors as returned by _get_image_blob
Returns:
blob (ndarray): R x 5 matrix of RoIs in the image pyramid with columns
[level, x1, y1, x2, y2]
"""
rois, levels = _project_im_rois(im_rois, im_scale)
rois_blob = np.hstack((levels, rois))
return rois_blob.astype(np.float32, copy=False)
def _project_im_rois(im_rois, scales):
"""Project image RoIs into the image pyramid built by _get_image_blob.
Arguments:
im_rois (ndarray): R x 4 matrix of RoIs in original image coordinates
scales (list): scale factors as returned by _get_image_blob
Returns:
rois (ndarray): R x 4 matrix of projected RoI coordinates
levels (ndarray): image pyramid levels used by each projected RoI
"""
rois = im_rois.astype(np.float, copy=False) * scales
levels = np.zeros((im_rois.shape[0], 1), dtype=np.int)
return rois, levels
def _add_multilevel_rois_for_test(blobs, name):
"""Distributes a set of RoIs across FPN pyramid levels by creating new level
specific RoI blobs.
Arguments:
blobs (dict): dictionary of blobs
name (str): a key in 'blobs' identifying the source RoI blob
Returns:
[by ref] blobs (dict): new keys named by `name + 'fpn' + level`
are added to dict each with a value that's an R_level x 5 ndarray of
RoIs (see _get_rois_blob for format)
"""
lvl_min = cfg.FPN.ROI_MIN_LEVEL
lvl_max = cfg.FPN.ROI_MAX_LEVEL
lvls = fpn_utils.map_rois_to_fpn_levels(blobs[name][:, 1:5], lvl_min, lvl_max)
fpn_utils.add_multilevel_roi_blobs(
blobs, name, blobs[name], lvls, lvl_min, lvl_max
)
def _get_blobs(im, rois, target_scale, target_max_size):
"""Convert an image and RoIs within that image into network inputs."""
blobs = {}
blobs['data'], im_scale, blobs['im_info'] = \
blob_utils.get_image_blob(im, target_scale, target_max_size)
if rois is not None:
blobs['rois'] = _get_rois_blob(rois, im_scale)
return blobs, im_scale
| ContrastiveLosses4VRD-master | lib/core/test_rel.py |
# Adapted from Detectron.pytorch/lib/modeling/fast_rcnn_heads.py
# for this project by Ji Zhang, 2019
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
from torch.autograd import Variable
from core.config import cfg
import nn as mynn
import utils.net as net_utils
class fast_rcnn_outputs(nn.Module):
def __init__(self, dim_in):
super().__init__()
self.cls_score = nn.Linear(dim_in, cfg.MODEL.NUM_CLASSES)
if cfg.MODEL.CLS_AGNOSTIC_BBOX_REG: # bg and fg
self.bbox_pred = nn.Linear(dim_in, 4 * 2)
else:
self.bbox_pred = nn.Linear(dim_in, 4 * cfg.MODEL.NUM_CLASSES)
self._init_weights()
def _init_weights(self):
init.normal_(self.cls_score.weight, std=0.01)
init.constant_(self.cls_score.bias, 0)
init.normal_(self.bbox_pred.weight, std=0.001)
init.constant_(self.bbox_pred.bias, 0)
def detectron_weight_mapping(self):
detectron_weight_mapping = {
'cls_score.weight': 'cls_score_w',
'cls_score.bias': 'cls_score_b',
'bbox_pred.weight': 'bbox_pred_w',
'bbox_pred.bias': 'bbox_pred_b'
}
orphan_in_detectron = []
return detectron_weight_mapping, orphan_in_detectron
def forward(self, x):
if x.dim() == 4:
x = x.squeeze(3).squeeze(2)
cls_score = self.cls_score(x)
if not self.training:
cls_score = F.softmax(cls_score, dim=1)
bbox_pred = self.bbox_pred(x)
return cls_score, bbox_pred
def fast_rcnn_losses(cls_score, bbox_pred, label_int32, bbox_targets,
bbox_inside_weights, bbox_outside_weights):
device_id = cls_score.get_device()
rois_label = Variable(torch.from_numpy(label_int32.astype('int64'))).cuda(device_id)
loss_cls = F.cross_entropy(cls_score, rois_label)
bbox_targets = Variable(torch.from_numpy(bbox_targets)).cuda(device_id)
bbox_inside_weights = Variable(torch.from_numpy(bbox_inside_weights)).cuda(device_id)
bbox_outside_weights = Variable(torch.from_numpy(bbox_outside_weights)).cuda(device_id)
loss_bbox = net_utils.smooth_l1_loss(
bbox_pred, bbox_targets, bbox_inside_weights, bbox_outside_weights)
# class accuracy
cls_preds = cls_score.max(dim=1)[1].type_as(rois_label)
accuracy_cls = cls_preds.eq(rois_label).float().mean(dim=0)
return loss_cls, loss_bbox, accuracy_cls
# ---------------------------------------------------------------------------- #
# Box heads
# ---------------------------------------------------------------------------- #
class roi_2mlp_head(nn.Module):
"""Add a ReLU MLP with two hidden layers."""
def __init__(self, dim_in, roi_xform_func, spatial_scale):
super().__init__()
self.dim_in = dim_in
self.roi_xform = roi_xform_func
self.spatial_scale = spatial_scale
self.dim_out = hidden_dim = cfg.FAST_RCNN.MLP_HEAD_DIM
roi_size = cfg.FAST_RCNN.ROI_XFORM_RESOLUTION
self.fc1 = nn.Linear(dim_in * roi_size**2, hidden_dim)
self.fc2 = nn.Linear(hidden_dim, hidden_dim)
self._init_weights()
def _init_weights(self):
mynn.init.XavierFill(self.fc1.weight)
init.constant_(self.fc1.bias, 0)
mynn.init.XavierFill(self.fc2.weight)
init.constant_(self.fc2.bias, 0)
def detectron_weight_mapping(self):
detectron_weight_mapping = {
'fc1.weight': 'fc6_w',
'fc1.bias': 'fc6_b',
'fc2.weight': 'fc7_w',
'fc2.bias': 'fc7_b'
}
return detectron_weight_mapping, []
def forward(self, x, rpn_ret, rois_name='rois', use_relu=True):
x = self.roi_xform(
x, rpn_ret,
blob_rois=rois_name,
method=cfg.FAST_RCNN.ROI_XFORM_METHOD,
resolution=cfg.FAST_RCNN.ROI_XFORM_RESOLUTION,
spatial_scale=self.spatial_scale,
sampling_ratio=cfg.FAST_RCNN.ROI_XFORM_SAMPLING_RATIO
)
batch_size = x.size(0)
x = F.relu(self.fc1(x.view(batch_size, -1)), inplace=True)
if use_relu:
x = F.relu(self.fc2(x), inplace=True)
else:
x = self.fc2(x)
return x
class roi_Xconv1fc_head(nn.Module):
"""Add a X conv + 1fc head, as a reference if not using GroupNorm"""
def __init__(self, dim_in, roi_xform_func, spatial_scale):
super().__init__()
self.dim_in = dim_in
self.roi_xform = roi_xform_func
self.spatial_scale = spatial_scale
hidden_dim = cfg.FAST_RCNN.CONV_HEAD_DIM
module_list = []
for i in range(cfg.FAST_RCNN.NUM_STACKED_CONVS):
module_list.extend([
nn.Conv2d(dim_in, hidden_dim, 3, 1, 1),
nn.ReLU(inplace=True)
])
dim_in = hidden_dim
self.convs = nn.Sequential(*module_list)
self.dim_out = fc_dim = cfg.FAST_RCNN.MLP_HEAD_DIM
roi_size = cfg.FAST_RCNN.ROI_XFORM_RESOLUTION
self.fc = nn.Linear(dim_in * roi_size * roi_size, fc_dim)
self._init_weights()
def _init_weights(self):
def _init(m):
if isinstance(m, nn.Conv2d):
mynn.init.MSRAFill(m.weight)
init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
mynn.init.XavierFill(m.weight)
init.constant_(m.bias, 0)
self.apply(_init)
def detectron_weight_mapping(self):
mapping = {}
for i in range(cfg.FAST_RCNN.NUM_STACKED_CONVS):
mapping.update({
'convs.%d.weight' % (i*2): 'head_conv%d_w' % (i+1),
'convs.%d.bias' % (i*2): 'head_conv%d_b' % (i+1)
})
mapping.update({
'fc.weight': 'fc6_w',
'fc.bias': 'fc6_b'
})
return mapping, []
def forward(self, x, rpn_ret):
x = self.roi_xform(
x, rpn_ret,
blob_rois='rois',
method=cfg.FAST_RCNN.ROI_XFORM_METHOD,
resolution=cfg.FAST_RCNN.ROI_XFORM_RESOLUTION,
spatial_scale=self.spatial_scale,
sampling_ratio=cfg.FAST_RCNN.ROI_XFORM_SAMPLING_RATIO
)
batch_size = x.size(0)
x = self.convs(x)
x = F.relu(self.fc(x.view(batch_size, -1)), inplace=True)
return x
class roi_Xconv1fc_gn_head(nn.Module):
"""Add a X conv + 1fc head, with GroupNorm"""
def __init__(self, dim_in, roi_xform_func, spatial_scale):
super().__init__()
self.dim_in = dim_in
self.roi_xform = roi_xform_func
self.spatial_scale = spatial_scale
hidden_dim = cfg.FAST_RCNN.CONV_HEAD_DIM
module_list = []
for i in range(cfg.FAST_RCNN.NUM_STACKED_CONVS):
module_list.extend([
nn.Conv2d(dim_in, hidden_dim, 3, 1, 1, bias=False),
nn.GroupNorm(net_utils.get_group_gn(hidden_dim), hidden_dim,
eps=cfg.GROUP_NORM.EPSILON),
nn.ReLU(inplace=True)
])
dim_in = hidden_dim
self.convs = nn.Sequential(*module_list)
self.dim_out = fc_dim = cfg.FAST_RCNN.MLP_HEAD_DIM
roi_size = cfg.FAST_RCNN.ROI_XFORM_RESOLUTION
self.fc = nn.Linear(dim_in * roi_size * roi_size, fc_dim)
self._init_weights()
def _init_weights(self):
def _init(m):
if isinstance(m, nn.Conv2d):
mynn.init.MSRAFill(m.weight)
elif isinstance(m, nn.Linear):
mynn.init.XavierFill(m.weight)
init.constant_(m.bias, 0)
self.apply(_init)
def detectron_weight_mapping(self):
mapping = {}
for i in range(cfg.FAST_RCNN.NUM_STACKED_CONVS):
mapping.update({
'convs.%d.weight' % (i*3): 'head_conv%d_w' % (i+1),
'convs.%d.weight' % (i*3+1): 'head_conv%d_gn_s' % (i+1),
'convs.%d.bias' % (i*3+1): 'head_conv%d_gn_b' % (i+1)
})
mapping.update({
'fc.weight': 'fc6_w',
'fc.bias': 'fc6_b'
})
return mapping, []
def forward(self, x, rpn_ret):
x = self.roi_xform(
x, rpn_ret,
blob_rois='rois',
method=cfg.FAST_RCNN.ROI_XFORM_METHOD,
resolution=cfg.FAST_RCNN.ROI_XFORM_RESOLUTION,
spatial_scale=self.spatial_scale,
sampling_ratio=cfg.FAST_RCNN.ROI_XFORM_SAMPLING_RATIO
)
batch_size = x.size(0)
x = self.convs(x)
x = F.relu(self.fc(x.view(batch_size, -1)), inplace=True)
return x
| ContrastiveLosses4VRD-master | lib/modeling_rel/fast_rcnn_heads.py |
"""
Some functions are adapted from Rowan Zellers:
https://github.com/rowanz/neural-motifs
"""
import os
import torch.nn as nn
import torch
from torch.autograd import Variable
import numpy as np
import logging
from six.moves import cPickle as pickle
from core.config import cfg
from modeling_rel.get_dataset_counts_rel import get_rel_counts
logger = logging.getLogger(__name__)
# This module is adapted from Rowan Zellers:
# https://github.com/rowanz/neural-motifs/blob/master/lib/sparse_targets.py
# Modified for this project
class FrequencyBias(nn.Module):
"""
The goal of this is to provide a simplified way of computing
P(predicate | obj1, obj2, img).
"""
def __init__(self, ds_name, eps=1e-3):
super(FrequencyBias, self).__init__()
if ds_name.find('vg') >= 0:
ds_name = 'vg'
elif ds_name.find('oi') >= 0:
ds_name = 'oi'
elif ds_name.find('vrd') >= 0:
ds_name = 'vrd'
else:
raise NotImplementedError
if cfg.MODEL.USE_OVLP_FILTER:
must_overlap = True
else:
must_overlap = False
fg_matrix, bg_matrix = get_rel_counts(ds_name, must_overlap=must_overlap)
bg_matrix += 1
fg_matrix[:, :, 0] = bg_matrix
pred_dist = np.log(fg_matrix / (fg_matrix.sum(2)[:, :, None] + 1e-08) + eps)
self.num_objs = pred_dist.shape[0]
pred_dist = torch.FloatTensor(pred_dist).view(-1, pred_dist.shape[2])
self.rel_baseline = nn.Embedding(pred_dist.size(0), pred_dist.size(1))
self.rel_baseline.weight.data = pred_dist
logger.info('Frequency bias tables loaded.')
def rel_index_with_labels(self, labels):
"""
:param labels: [batch_size, 2]
:return:
"""
return self.rel_baseline(labels[:, 0] * self.num_objs + labels[:, 1])
| ContrastiveLosses4VRD-master | lib/modeling_rel/sparse_targets_rel.py |
# Written by Ji Zhang in 2019
import collections
import numpy as np
import logging
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import init
from core.config import cfg
import utils.net as net_utils
import modeling.ResNet as ResNet
from modeling.generate_anchors import generate_anchors
from modeling.generate_proposals import GenerateProposalsOp
from modeling.collect_and_distribute_fpn_rpn_proposals import CollectAndDistributeFpnRpnProposalsOp
import nn as mynn
logger = logging.getLogger(__name__)
class rel_pyramid_module(nn.Module):
def __init__(self, num_backbone_stages):
super().__init__()
fpn_dim = cfg.FPN.DIM
self.num_backbone_stages = num_backbone_stages
self.prd_conv_lateral = nn.ModuleList()
for i in range(self.num_backbone_stages):
if cfg.FPN.USE_GN:
self.prd_conv_lateral.append(nn.Sequential(
nn.Conv2d(fpn_dim, fpn_dim, 1, 1, 0, bias=False),
nn.GroupNorm(net_utils.get_group_gn(fpn_dim), fpn_dim,
eps=cfg.GROUP_NORM.EPSILON)))
else:
self.prd_conv_lateral.append(nn.Conv2d(fpn_dim, fpn_dim, 1, 1, 0))
self.posthoc_modules = nn.ModuleList()
for i in range(self.num_backbone_stages):
if cfg.FPN.USE_GN:
self.posthoc_modules.append(nn.Sequential(
nn.Conv2d(fpn_dim, fpn_dim, 3, 1, 1, bias=False),
nn.GroupNorm(net_utils.get_group_gn(fpn_dim), fpn_dim,
eps=cfg.GROUP_NORM.EPSILON)))
else:
self.posthoc_modules.append(nn.Conv2d(fpn_dim, fpn_dim, 3, 1, 1))
self._init_weights()
def _init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):
mynn.init.XavierFill(m.weight)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def forward(self, blob_conv):
# blob_conv is in the order (P5, P4, P3, P2)
rel_lateral_inner_blob = None
rel_lateral_output_blobs = []
for i in range(self.num_backbone_stages):
if rel_lateral_inner_blob is not None:
bu = F.max_pool2d(rel_lateral_inner_blob, 2, stride=2)
rel_lateral_inner_blob = \
self.prd_conv_lateral[i](blob_conv[-1 - i]) + bu
else:
rel_lateral_inner_blob = \
self.prd_conv_lateral[i](blob_conv[-1 - i])
rel_lateral_output_blobs.append(self.posthoc_modules[i](rel_lateral_inner_blob))
# the output is in the order of (P2, P3, P4, P5), we need to recover it back to (P5, P4, P3, P2)
rel_lateral_output_blobs.reverse()
return rel_lateral_output_blobs
| ContrastiveLosses4VRD-master | lib/modeling_rel/rel_pyramid_module.py |
# Adapted from Detectron.pytorch/lib/modeling/generate_proposal_labels.py
# for this project by Ji Zhang, 2019
from torch import nn
from core.config import cfg
from datasets_rel import json_dataset_rel
from roi_data_rel.fast_rcnn_rel import add_rel_blobs
class GenerateRelProposalLabelsOp(nn.Module):
def __init__(self):
super().__init__()
def forward(self, sbj_rois, obj_rois, det_rois, roidb, im_info):
im_scales = im_info.data.numpy()[:, 2]
# For historical consistency with the original Faster R-CNN
# implementation we are *not* filtering crowd proposals.
# This choice should be investigated in the future (it likely does
# not matter).
# Note: crowd_thresh=0 will ignore _filter_crowd_proposals
json_dataset_rel.add_rel_proposals(roidb, sbj_rois, obj_rois, det_rois, im_scales)
output_blob_names = ['sbj_rois', 'obj_rois', 'rel_rois', 'fg_prd_labels_int32', 'all_prd_labels_int32', 'fg_size']
if cfg.MODEL.USE_SPATIAL_FEAT:
output_blob_names += ['spt_feat']
if cfg.MODEL.USE_FREQ_BIAS:
output_blob_names += ['all_sbj_labels_int32']
output_blob_names += ['all_obj_labels_int32']
if cfg.MODEL.USE_NODE_CONTRASTIVE_LOSS or cfg.MODEL.USE_NODE_CONTRASTIVE_SO_AWARE_LOSS or cfg.MODEL.USE_NODE_CONTRASTIVE_P_AWARE_LOSS:
output_blob_names += ['binary_labels_sbj_pos_int32',
'sbj_rois_sbj_pos', 'obj_rois_sbj_pos', 'rel_rois_sbj_pos',
'spt_feat_sbj_pos',
'sbj_labels_sbj_pos_int32', 'obj_labels_sbj_pos_int32', 'prd_labels_sbj_pos_int32',
'sbj_labels_sbj_pos_fg_int32', 'obj_labels_sbj_pos_fg_int32',
'inds_unique_sbj_pos',
'inds_reverse_sbj_pos',
'binary_labels_obj_pos_int32',
'sbj_rois_obj_pos', 'obj_rois_obj_pos', 'rel_rois_obj_pos',
'spt_feat_obj_pos',
'sbj_labels_obj_pos_int32', 'obj_labels_obj_pos_int32', 'prd_labels_obj_pos_int32',
'sbj_labels_obj_pos_fg_int32', 'obj_labels_obj_pos_fg_int32',
'inds_unique_obj_pos',
'inds_reverse_obj_pos']
blobs = {k: [] for k in output_blob_names}
add_rel_blobs(blobs, im_scales, roidb)
return blobs
| ContrastiveLosses4VRD-master | lib/modeling_rel/generate_rel_proposal_labels.py |
ContrastiveLosses4VRD-master | lib/modeling_rel/__init__.py |
|
# Written by Ji Zhang in 2019
import numpy as np
from numpy import linalg as la
import json
import logging
from torch import nn
from torch.nn import init
import torch.nn.functional as F
from core.config import cfg
from modeling_rel.generate_rel_proposal_labels import GenerateRelProposalLabelsOp
import modeling.FPN as FPN
import utils_rel.boxes_rel as box_utils_rel
import utils.fpn as fpn_utils
logger = logging.getLogger(__name__)
def generic_relpn_outputs():
return single_scale_relpn_outputs()
class single_scale_relpn_outputs(nn.Module):
"""Add RelPN outputs to a single scale model (i.e., no FPN)."""
def __init__(self):
super().__init__()
self.RelPN_GenerateProposalLabels = GenerateRelProposalLabelsOp()
ds_name = cfg.TRAIN.DATASETS[0] if len(cfg.TRAIN.DATASETS) else cfg.TEST.DATASETS[0]
def get_roi_inds(self, det_labels, lbls):
lbl_set = np.array(lbls)
inds = np.where(np.isin(det_labels, lbl_set))[0]
return inds
def remove_self_pairs(self, det_size, sbj_inds, obj_inds):
mask = np.ones(sbj_inds.shape[0], dtype=bool)
for i in range(det_size):
mask[i + det_size * i] = False
keeps = np.where(mask)[0]
sbj_inds = sbj_inds[keeps]
obj_inds = obj_inds[keeps]
return sbj_inds, obj_inds
def forward(self, det_rois, det_labels, det_scores, im_info, dataset_name, roidb=None):
"""
det_rois: feature maps from the backbone network. (Variable)
im_info: (CPU Variable)
roidb: (list of ndarray)
"""
# Get pairwise proposals first
if roidb is not None:
# we always feed one image per batch during training
assert len(roidb) == 1
sbj_inds = np.repeat(np.arange(det_rois.shape[0]), det_rois.shape[0])
obj_inds = np.tile(np.arange(det_rois.shape[0]), det_rois.shape[0])
# remove self paired rois
if det_rois.shape[0] > 1: # no pairs to remove when there is at most one detection
sbj_inds, obj_inds = self.remove_self_pairs(det_rois.shape[0], sbj_inds, obj_inds)
sbj_rois = det_rois[sbj_inds]
obj_rois = det_rois[obj_inds]
im_scale = im_info.data.numpy()[:, 2][0]
sbj_boxes = sbj_rois[:, 1:] / im_scale
obj_boxes = obj_rois[:, 1:] / im_scale
# filters out those roi pairs whose boxes are not overlapping in the original scales
if cfg.MODEL.USE_OVLP_FILTER:
ovlp_so = box_utils_rel.bbox_pair_overlaps(
sbj_boxes.astype(dtype=np.float32, copy=False),
obj_boxes.astype(dtype=np.float32, copy=False))
ovlp_inds = np.where(ovlp_so > 0)[0]
sbj_inds = sbj_inds[ovlp_inds]
obj_inds = obj_inds[ovlp_inds]
sbj_rois = sbj_rois[ovlp_inds]
obj_rois = obj_rois[ovlp_inds]
sbj_boxes = sbj_boxes[ovlp_inds]
obj_boxes = obj_boxes[ovlp_inds]
return_dict = {}
if self.training:
# Add binary relationships
blobs_out = self.RelPN_GenerateProposalLabels(sbj_rois, obj_rois, det_rois, roidb, im_info)
return_dict.update(blobs_out)
else:
sbj_labels = det_labels[sbj_inds]
obj_labels = det_labels[obj_inds]
sbj_scores = det_scores[sbj_inds]
obj_scores = det_scores[obj_inds]
rel_rois = box_utils_rel.rois_union(sbj_rois, obj_rois)
return_dict['det_rois'] = det_rois
return_dict['sbj_inds'] = sbj_inds
return_dict['obj_inds'] = obj_inds
return_dict['sbj_rois'] = sbj_rois
return_dict['obj_rois'] = obj_rois
return_dict['rel_rois'] = rel_rois
return_dict['sbj_labels'] = sbj_labels
return_dict['obj_labels'] = obj_labels
return_dict['sbj_scores'] = sbj_scores
return_dict['obj_scores'] = obj_scores
return_dict['fg_size'] = np.array([sbj_rois.shape[0]], dtype=np.int32)
im_scale = im_info.data.numpy()[:, 2][0]
im_w = im_info.data.numpy()[:, 1][0]
im_h = im_info.data.numpy()[:, 0][0]
if cfg.MODEL.USE_SPATIAL_FEAT:
spt_feat = box_utils_rel.get_spt_features(sbj_boxes, obj_boxes, im_w, im_h)
return_dict['spt_feat'] = spt_feat
if cfg.MODEL.USE_FREQ_BIAS or cfg.MODEL.RUN_BASELINE:
return_dict['all_sbj_labels_int32'] = sbj_labels.astype(np.int32, copy=False) - 1 # det_labels start from 1
return_dict['all_obj_labels_int32'] = obj_labels.astype(np.int32, copy=False) - 1 # det_labels start from 1
if cfg.FPN.FPN_ON and cfg.FPN.MULTILEVEL_ROIS:
lvl_min = cfg.FPN.ROI_MIN_LEVEL
lvl_max = cfg.FPN.ROI_MAX_LEVEL
# when use min_rel_area, the same sbj/obj area could be mapped to different feature levels
# when they are associated with different relationships
# Thus we cannot get det_rois features then gather sbj/obj features
# The only way is gather sbj/obj per relationship, thus need to return sbj_rois/obj_rois
rois_blob_names = ['det_rois', 'rel_rois']
for rois_blob_name in rois_blob_names:
# Add per FPN level roi blobs named like: <rois_blob_name>_fpn<lvl>
target_lvls = fpn_utils.map_rois_to_fpn_levels(
return_dict[rois_blob_name][:, 1:5], lvl_min, lvl_max)
fpn_utils.add_multilevel_roi_blobs(
return_dict, rois_blob_name, return_dict[rois_blob_name], target_lvls,
lvl_min, lvl_max)
return return_dict
| ContrastiveLosses4VRD-master | lib/modeling_rel/relpn_heads.py |
# Written by Ji Zhang in 2019
import os
import numpy as np
import logging
from collections import OrderedDict
import torch
import torch.nn as nn
import torch.nn.functional as F
from core.config import cfg
import nn as mynn
import torchvision.models as models
logger = logging.getLogger(__name__)
# ---------------------------------------------------------------------------- #
# VGG16 architecture
# ---------------------------------------------------------------------------- #
vgg = models.vgg16()
if cfg.VGG16.IMAGENET_PRETRAINED_WEIGHTS != '':
logger.info("Loading imagenet pretrained weights from %s", cfg.VGG16.IMAGENET_PRETRAINED_WEIGHTS)
state_dict = torch.load(cfg.VGG16.IMAGENET_PRETRAINED_WEIGHTS)
vgg.load_state_dict({k:v for k, v in state_dict.items() if k in vgg.state_dict()})
class VGG16_conv_body(nn.Module):
def __init__(self):
super().__init__()
self.num_layers = 16
self.spatial_scale = 1. / 16. # final feature scale wrt. original image scale
self.dim_out = 512
self._init_modules()
def _init_modules(self):
# not using the last maxpool layer
self.convs = nn.Sequential(*list(vgg.features._modules.values())[:-1])
for layer in range(10):
for p in self.convs[layer].parameters(): p.requires_grad = False
def forward(self, x):
return self.convs(x)
class VGG16_roi_conv5_head(nn.Module):
def __init__(self, dim_in, roi_xform_func, spatial_scale):
super().__init__()
self.roi_xform = roi_xform_func
self.spatial_scale = spatial_scale
self.dim_out = 4096
self.dim_roi_out = dim_in # 512
self._init_modules()
def _init_modules(self):
self.heads = nn.Sequential(*list(vgg.classifier._modules.values())[:-1])
def forward(self, x, rpn_ret, rois_name='rois', use_relu=True):
x = self.roi_xform(
x, rpn_ret,
blob_rois=rois_name,
method=cfg.FAST_RCNN.ROI_XFORM_METHOD,
resolution=7,
spatial_scale=self.spatial_scale,
sampling_ratio=cfg.FAST_RCNN.ROI_XFORM_SAMPLING_RATIO
)
feat = x.view(x.size(0), -1)
if use_relu:
for layer in list(self.heads.children()):
feat = layer(feat)
else:
# not use the last Drop-out and ReLU in fc7 (keep it the same with Rawan's paper)
for layer in list(self.heads.children())[:-2]:
feat = layer(feat)
return feat
| ContrastiveLosses4VRD-master | lib/modeling_rel/VGG16.py |
# Written by Ji Zhang in 2019
import numpy as np
from numpy import linalg as la
import math
import logging
import json
import torch
from torch import nn
from torch.nn import init
import torch.nn.functional as F
from torch.autograd import Variable
import nn as mynn
from core.config import cfg
from modeling_rel.sparse_targets_rel import FrequencyBias
logger = logging.getLogger(__name__)
class reldn_head(nn.Module):
def __init__(self, dim_in):
super().__init__()
dim_in_final = dim_in // 3
self.dim_in_final = dim_in_final
if cfg.MODEL.USE_BG:
num_prd_classes = cfg.MODEL.NUM_PRD_CLASSES + 1
else:
num_prd_classes = cfg.MODEL.NUM_PRD_CLASSES
if cfg.MODEL.RUN_BASELINE:
# only run it on testing mode
self.freq_bias = FrequencyBias(cfg.TEST.DATASETS[0])
return
self.prd_cls_feats = nn.Sequential(
nn.Linear(dim_in, dim_in // 2),
nn.LeakyReLU(0.1),
nn.Linear(dim_in // 2, dim_in_final),
nn.LeakyReLU(0.1))
self.prd_cls_scores = nn.Linear(dim_in_final, num_prd_classes)
if cfg.MODEL.USE_FREQ_BIAS:
# Assume we are training/testing on only one dataset
if len(cfg.TRAIN.DATASETS):
self.freq_bias = FrequencyBias(cfg.TRAIN.DATASETS[0])
else:
self.freq_bias = FrequencyBias(cfg.TEST.DATASETS[0])
if cfg.MODEL.USE_SPATIAL_FEAT:
self.spt_cls_feats = nn.Sequential(
nn.Linear(28, 64),
nn.LeakyReLU(0.1),
nn.Linear(64, 64),
nn.LeakyReLU(0.1))
self.spt_cls_scores = nn.Linear(64, num_prd_classes)
if cfg.MODEL.ADD_SO_SCORES:
self.prd_sbj_scores = nn.Linear(dim_in_final, num_prd_classes)
self.prd_obj_scores = nn.Linear(dim_in_final, num_prd_classes)
self._init_weights()
def _init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):
# nn.init.kaiming_normal_(m.weight, mode='fan_out')
mynn.init.XavierFill(m.weight)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# spo_feat will be concatenation of SPO
def forward(self, spo_feat, spt_feat=None, sbj_labels=None, obj_labels=None, sbj_feat=None, obj_feat=None):
device_id = spo_feat.get_device()
if sbj_labels is not None:
sbj_labels = Variable(torch.from_numpy(sbj_labels.astype('int64'))).cuda(device_id)
if obj_labels is not None:
obj_labels = Variable(torch.from_numpy(obj_labels.astype('int64'))).cuda(device_id)
if cfg.MODEL.RUN_BASELINE:
assert sbj_labels is not None and obj_labels is not None
prd_cls_scores = self.freq_bias.rel_index_with_labels(torch.stack((sbj_labels, obj_labels), 1))
prd_cls_scores = F.softmax(prd_cls_scores, dim=1)
return prd_cls_scores, prd_cls_scores, None, prd_cls_scores, None, None
if spo_feat.dim() == 4:
spo_feat = spo_feat.squeeze(3).squeeze(2)
prd_cls_feats = self.prd_cls_feats(spo_feat)
prd_vis_scores = self.prd_cls_scores(prd_cls_feats)
sbj_cls_scores = None
obj_cls_scores = None
if cfg.MODEL.USE_FREQ_BIAS:
assert sbj_labels is not None and obj_labels is not None
prd_bias_scores = self.freq_bias.rel_index_with_labels(torch.stack((sbj_labels, obj_labels), 1))
if cfg.MODEL.USE_SPATIAL_FEAT:
assert spt_feat is not None
device_id = spo_feat.get_device()
spt_feat = Variable(torch.from_numpy(spt_feat.astype('float32'))).cuda(device_id)
spt_cls_feats = self.spt_cls_feats(spt_feat)
prd_spt_scores = self.spt_cls_scores(spt_cls_feats)
else:
prd_spt_scores = None
if cfg.MODEL.ADD_SO_SCORES:
prd_sbj_scores = self.prd_sbj_scores(sbj_feat)
prd_obj_scores = self.prd_obj_scores(obj_feat)
if cfg.MODEL.ADD_SCORES_ALL:
ttl_cls_scores = torch.tensor(prd_vis_scores)
if cfg.MODEL.USE_FREQ_BIAS:
ttl_cls_scores += prd_bias_scores
if cfg.MODEL.USE_SPATIAL_FEAT:
ttl_cls_scores += prd_spt_scores
if cfg.MODEL.ADD_SO_SCORES:
ttl_cls_scores += prd_sbj_scores + prd_obj_scores
else:
ttl_cls_scores = None
if not self.training:
prd_vis_scores = F.softmax(prd_vis_scores, dim=1)
if cfg.MODEL.USE_FREQ_BIAS:
prd_bias_scores = F.softmax(prd_bias_scores, dim=1)
if cfg.MODEL.USE_SPATIAL_FEAT:
prd_spt_scores = F.softmax(prd_spt_scores, dim=1)
if cfg.MODEL.ADD_SCORES_ALL:
ttl_cls_scores = F.softmax(ttl_cls_scores, dim=1)
return prd_vis_scores, prd_bias_scores, prd_spt_scores, ttl_cls_scores, sbj_cls_scores, obj_cls_scores
def reldn_losses(prd_cls_scores, prd_labels_int32, fg_only=False):
device_id = prd_cls_scores.get_device()
prd_labels = Variable(torch.from_numpy(prd_labels_int32.astype('int64'))).cuda(device_id)
loss_cls_prd = F.cross_entropy(prd_cls_scores, prd_labels)
# class accuracy
prd_cls_preds = prd_cls_scores.max(dim=1)[1].type_as(prd_labels)
accuracy_cls_prd = prd_cls_preds.eq(prd_labels).float().mean(dim=0)
return loss_cls_prd, accuracy_cls_prd
def reldn_contrastive_losses(prd_scores_sbj_pos, prd_scores_obj_pos, rel_ret):
# sbj
prd_probs_sbj_pos = F.softmax(prd_scores_sbj_pos, dim=1)
sbj_pair_pos_batch, sbj_pair_neg_batch, sbj_target = split_pos_neg_spo_agnostic(
prd_probs_sbj_pos, rel_ret['binary_labels_sbj_pos_int32'], rel_ret['inds_unique_sbj_pos'], rel_ret['inds_reverse_sbj_pos'])
sbj_contrastive_loss = F.margin_ranking_loss(sbj_pair_pos_batch, sbj_pair_neg_batch, sbj_target, margin=cfg.MODEL.NODE_CONTRASTIVE_MARGIN)
# obj
prd_probs_obj_pos = F.softmax(prd_scores_obj_pos, dim=1)
obj_pair_pos_batch, obj_pair_neg_batch, obj_target = split_pos_neg_spo_agnostic(
prd_probs_obj_pos, rel_ret['binary_labels_obj_pos_int32'], rel_ret['inds_unique_obj_pos'], rel_ret['inds_reverse_obj_pos'])
obj_contrastive_loss = F.margin_ranking_loss(obj_pair_pos_batch, obj_pair_neg_batch, obj_target, margin=cfg.MODEL.NODE_CONTRASTIVE_MARGIN)
return sbj_contrastive_loss, obj_contrastive_loss
def reldn_so_contrastive_losses(prd_scores_sbj_pos, prd_scores_obj_pos, rel_ret):
# sbj
prd_probs_sbj_pos = F.softmax(prd_scores_sbj_pos, dim=1)
sbj_pair_pos_batch, sbj_pair_neg_batch, sbj_target = split_pos_neg_so_aware(
prd_probs_sbj_pos,
rel_ret['binary_labels_sbj_pos_int32'], rel_ret['inds_unique_sbj_pos'], rel_ret['inds_reverse_sbj_pos'],
rel_ret['sbj_labels_sbj_pos_int32'], rel_ret['obj_labels_sbj_pos_int32'], 's')
sbj_so_contrastive_loss = F.margin_ranking_loss(sbj_pair_pos_batch, sbj_pair_neg_batch, sbj_target, margin=cfg.MODEL.NODE_CONTRASTIVE_SO_AWARE_MARGIN)
# obj
prd_probs_obj_pos = F.softmax(prd_scores_obj_pos, dim=1)
obj_pair_pos_batch, obj_pair_neg_batch, obj_target = split_pos_neg_so_aware(
prd_probs_obj_pos,
rel_ret['binary_labels_obj_pos_int32'], rel_ret['inds_unique_obj_pos'], rel_ret['inds_reverse_obj_pos'],
rel_ret['sbj_labels_obj_pos_int32'], rel_ret['obj_labels_obj_pos_int32'], 'o')
obj_so_contrastive_loss = F.margin_ranking_loss(obj_pair_pos_batch, obj_pair_neg_batch, obj_target, margin=cfg.MODEL.NODE_CONTRASTIVE_SO_AWARE_MARGIN)
return sbj_so_contrastive_loss, obj_so_contrastive_loss
def reldn_p_contrastive_losses(prd_scores_sbj_pos, prd_scores_obj_pos, prd_bias_scores_sbj_pos, prd_bias_scores_obj_pos, rel_ret):
# sbj
prd_probs_sbj_pos = F.softmax(prd_scores_sbj_pos, dim=1)
prd_bias_probs_sbj_pos = F.softmax(prd_bias_scores_sbj_pos, dim=1)
sbj_pair_pos_batch, sbj_pair_neg_batch, sbj_target = split_pos_neg_p_aware(
prd_probs_sbj_pos,
prd_bias_probs_sbj_pos,
rel_ret['binary_labels_sbj_pos_int32'], rel_ret['inds_unique_sbj_pos'], rel_ret['inds_reverse_sbj_pos'],
rel_ret['prd_labels_sbj_pos_int32'])
sbj_p_contrastive_loss = F.margin_ranking_loss(sbj_pair_pos_batch, sbj_pair_neg_batch, sbj_target, margin=cfg.MODEL.NODE_CONTRASTIVE_P_AWARE_MARGIN)
# obj
prd_probs_obj_pos = F.softmax(prd_scores_obj_pos, dim=1)
prd_bias_probs_obj_pos = F.softmax(prd_bias_scores_obj_pos, dim=1)
obj_pair_pos_batch, obj_pair_neg_batch, obj_target = split_pos_neg_p_aware(
prd_probs_obj_pos,
prd_bias_probs_obj_pos,
rel_ret['binary_labels_obj_pos_int32'], rel_ret['inds_unique_obj_pos'], rel_ret['inds_reverse_obj_pos'],
rel_ret['prd_labels_obj_pos_int32'])
obj_p_contrastive_loss = F.margin_ranking_loss(obj_pair_pos_batch, obj_pair_neg_batch, obj_target, margin=cfg.MODEL.NODE_CONTRASTIVE_P_AWARE_MARGIN)
return sbj_p_contrastive_loss, obj_p_contrastive_loss
def split_pos_neg_spo_agnostic(prd_probs, binary_labels_pos, inds_unique_pos, inds_reverse_pos):
device_id = prd_probs.get_device()
prd_pos_probs = 1 - prd_probs[:, 0] # shape is (#rels,)
# loop over each group
pair_pos_batch = torch.ones(1).cuda(device_id) # a dummy sample in the batch in case there is no real sample
pair_neg_batch = torch.zeros(1).cuda(device_id) # a dummy sample in the batch in case there is no real sample
for i in range(inds_unique_pos.shape[0]):
inds = np.where(inds_reverse_pos == i)[0]
prd_pos_probs_i = prd_pos_probs[inds]
binary_labels_pos_i = binary_labels_pos[inds]
pair_pos_inds = np.where(binary_labels_pos_i > 0)[0]
pair_neg_inds = np.where(binary_labels_pos_i == 0)[0]
if pair_pos_inds.size == 0 or pair_neg_inds.size == 0: # ignore this node if either pos or neg does not exist
continue
prd_pos_probs_i_pair_pos = prd_pos_probs_i[pair_pos_inds]
prd_pos_probs_i_pair_neg = prd_pos_probs_i[pair_neg_inds]
min_prd_pos_probs_i_pair_pos = torch.min(prd_pos_probs_i_pair_pos)
max_prd_pos_probs_i_pair_neg = torch.max(prd_pos_probs_i_pair_neg)
pair_pos_batch = torch.cat((pair_pos_batch, min_prd_pos_probs_i_pair_pos.unsqueeze(0)))
pair_neg_batch = torch.cat((pair_neg_batch, max_prd_pos_probs_i_pair_neg.unsqueeze(0)))
target = torch.ones_like(pair_pos_batch).cuda(device_id)
return pair_pos_batch, pair_neg_batch, target
def split_pos_neg_so_aware(prd_probs, binary_labels_pos, inds_unique_pos, inds_reverse_pos, sbj_labels_pos, obj_labels_pos, s_or_o):
device_id = prd_probs.get_device()
prd_pos_probs = 1 - prd_probs[:, 0] # shape is (#rels,)
# loop over each group
pair_pos_batch = torch.ones(1).cuda(device_id) # a dummy sample in the batch in case there is no real sample
pair_neg_batch = torch.zeros(1).cuda(device_id) # a dummy sample in the batch in case there is no real sample
for i in range(inds_unique_pos.shape[0]):
inds = np.where(inds_reverse_pos == i)[0]
prd_pos_probs_i = prd_pos_probs[inds]
binary_labels_pos_i = binary_labels_pos[inds]
sbj_labels_pos_i = sbj_labels_pos[inds]
obj_labels_pos_i = obj_labels_pos[inds]
pair_pos_inds = np.where(binary_labels_pos_i > 0)[0]
pair_neg_inds = np.where(binary_labels_pos_i == 0)[0]
if pair_pos_inds.size == 0 or pair_neg_inds.size == 0: # ignore this node if either pos or neg does not exist
continue
prd_pos_probs_i_pair_pos = prd_pos_probs_i[pair_pos_inds]
prd_pos_probs_i_pair_neg = prd_pos_probs_i[pair_neg_inds]
sbj_labels_i_pair_pos = sbj_labels_pos_i[pair_pos_inds]
obj_labels_i_pair_pos = obj_labels_pos_i[pair_pos_inds]
sbj_labels_i_pair_neg = sbj_labels_pos_i[pair_neg_inds]
obj_labels_i_pair_neg = obj_labels_pos_i[pair_neg_inds]
max_prd_pos_probs_i_pair_neg = torch.max(prd_pos_probs_i_pair_neg) # this is fixed for a given i
if s_or_o == 's':
# get all unique object labels
unique_obj_labels, inds_unique_obj_labels, inds_reverse_obj_labels = np.unique(
obj_labels_i_pair_pos, return_index=True, return_inverse=True, axis=0)
for j in range(inds_unique_obj_labels.shape[0]):
# get min pos
inds_j = np.where(inds_reverse_obj_labels == j)[0]
prd_pos_probs_i_pos_j = prd_pos_probs_i_pair_pos[inds_j]
min_prd_pos_probs_i_pos_j = torch.min(prd_pos_probs_i_pos_j)
# get max neg
neg_j_inds = np.where(obj_labels_i_pair_neg == unique_obj_labels[j])[0]
if neg_j_inds.size == 0:
if cfg.MODEL.USE_SPO_AGNOSTIC_COMPENSATION:
pair_pos_batch = torch.cat((pair_pos_batch, min_prd_pos_probs_i_pos_j.unsqueeze(0)))
pair_neg_batch = torch.cat((pair_neg_batch, max_prd_pos_probs_i_pair_neg.unsqueeze(0)))
continue
prd_pos_probs_i_neg_j = prd_pos_probs_i_pair_neg[neg_j_inds]
max_prd_pos_probs_i_neg_j = torch.max(prd_pos_probs_i_neg_j)
pair_pos_batch = torch.cat((pair_pos_batch, min_prd_pos_probs_i_pos_j.unsqueeze(0)))
pair_neg_batch = torch.cat((pair_neg_batch, max_prd_pos_probs_i_neg_j.unsqueeze(0)))
else:
# get all unique subject labels
unique_sbj_labels, inds_unique_sbj_labels, inds_reverse_sbj_labels = np.unique(
sbj_labels_i_pair_pos, return_index=True, return_inverse=True, axis=0)
for j in range(inds_unique_sbj_labels.shape[0]):
# get min pos
inds_j = np.where(inds_reverse_sbj_labels == j)[0]
prd_pos_probs_i_pos_j = prd_pos_probs_i_pair_pos[inds_j]
min_prd_pos_probs_i_pos_j = torch.min(prd_pos_probs_i_pos_j)
# get max neg
neg_j_inds = np.where(sbj_labels_i_pair_neg == unique_sbj_labels[j])[0]
if neg_j_inds.size == 0:
if cfg.MODEL.USE_SPO_AGNOSTIC_COMPENSATION:
pair_pos_batch = torch.cat((pair_pos_batch, min_prd_pos_probs_i_pos_j.unsqueeze(0)))
pair_neg_batch = torch.cat((pair_neg_batch, max_prd_pos_probs_i_pair_neg.unsqueeze(0)))
continue
prd_pos_probs_i_neg_j = prd_pos_probs_i_pair_neg[neg_j_inds]
max_prd_pos_probs_i_neg_j = torch.max(prd_pos_probs_i_neg_j)
pair_pos_batch = torch.cat((pair_pos_batch, min_prd_pos_probs_i_pos_j.unsqueeze(0)))
pair_neg_batch = torch.cat((pair_neg_batch, max_prd_pos_probs_i_neg_j.unsqueeze(0)))
target = torch.ones_like(pair_pos_batch).cuda(device_id)
return pair_pos_batch, pair_neg_batch, target
def split_pos_neg_p_aware(prd_probs, prd_bias_probs, binary_labels_pos, inds_unique_pos, inds_reverse_pos, prd_labels_pos):
device_id = prd_probs.get_device()
prd_pos_probs = 1 - prd_probs[:, 0] # shape is (#rels,)
prd_labels_det = prd_probs[:, 1:].argmax(dim=1).data.cpu().numpy() + 1 # prd_probs is a torch.tensor, exlucding background
# loop over each group
pair_pos_batch = torch.ones(1).cuda(device_id) # a dummy sample in the batch in case there is no real sample
pair_neg_batch = torch.zeros(1).cuda(device_id) # a dummy sample in the batch in case there is no real sample
for i in range(inds_unique_pos.shape[0]):
inds = np.where(inds_reverse_pos == i)[0]
prd_pos_probs_i = prd_pos_probs[inds]
prd_labels_pos_i = prd_labels_pos[inds]
prd_labels_det_i = prd_labels_det[inds]
binary_labels_pos_i = binary_labels_pos[inds]
pair_pos_inds = np.where(binary_labels_pos_i > 0)[0]
pair_neg_inds = np.where(binary_labels_pos_i == 0)[0]
if pair_pos_inds.size == 0 or pair_neg_inds.size == 0: # ignore this node if either pos or neg does not exist
continue
prd_pos_probs_i_pair_pos = prd_pos_probs_i[pair_pos_inds]
prd_pos_probs_i_pair_neg = prd_pos_probs_i[pair_neg_inds]
prd_labels_i_pair_pos = prd_labels_pos_i[pair_pos_inds]
prd_labels_i_pair_neg = prd_labels_det_i[pair_neg_inds]
max_prd_pos_probs_i_pair_neg = torch.max(prd_pos_probs_i_pair_neg) # this is fixed for a given i
unique_prd_labels, inds_unique_prd_labels, inds_reverse_prd_labels = np.unique(
prd_labels_i_pair_pos, return_index=True, return_inverse=True, axis=0)
for j in range(inds_unique_prd_labels.shape[0]):
# get min pos
inds_j = np.where(inds_reverse_prd_labels == j)[0]
prd_pos_probs_i_pos_j = prd_pos_probs_i_pair_pos[inds_j]
min_prd_pos_probs_i_pos_j = torch.min(prd_pos_probs_i_pos_j)
# get max neg
neg_j_inds = np.where(prd_labels_i_pair_neg == unique_prd_labels[j])[0]
if neg_j_inds.size == 0:
if cfg.MODEL.USE_SPO_AGNOSTIC_COMPENSATION:
pair_pos_batch = torch.cat((pair_pos_batch, min_prd_pos_probs_i_pos_j.unsqueeze(0)))
pair_neg_batch = torch.cat((pair_neg_batch, max_prd_pos_probs_i_pair_neg.unsqueeze(0)))
continue
prd_pos_probs_i_neg_j = prd_pos_probs_i_pair_neg[neg_j_inds]
max_prd_pos_probs_i_neg_j = torch.max(prd_pos_probs_i_neg_j)
pair_pos_batch = torch.cat((pair_pos_batch, min_prd_pos_probs_i_pos_j.unsqueeze(0)))
pair_neg_batch = torch.cat((pair_neg_batch, max_prd_pos_probs_i_neg_j.unsqueeze(0)))
target = torch.ones_like(pair_pos_batch).cuda(device_id)
return pair_pos_batch, pair_neg_batch, target
| ContrastiveLosses4VRD-master | lib/modeling_rel/reldn_heads.py |
# Adapted from Detectron.pytorch/lib/modeling/model_builder.py
# for this project by Ji Zhang, 2019
from functools import wraps
import importlib
import logging
import numpy as np
import copy
import json
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from core.config import cfg
from model.roi_pooling.functions.roi_pool import RoIPoolFunction
from model.roi_crop.functions.roi_crop import RoICropFunction
from modeling.roi_xfrom.roi_align.functions.roi_align import RoIAlignFunction
import modeling.rpn_heads as rpn_heads
import modeling_rel.fast_rcnn_heads as fast_rcnn_heads
import modeling_rel.relpn_heads as relpn_heads
import modeling_rel.reldn_heads as reldn_heads
import modeling_rel.rel_pyramid_module as rel_pyramid_module
import utils_rel.boxes_rel as box_utils_rel
import utils.boxes as box_utils
import utils.blob as blob_utils
import utils_rel.net_rel as net_utils_rel
from utils.timer import Timer
import utils.resnet_weights_helper as resnet_utils
import utils.fpn as fpn_utils
logger = logging.getLogger(__name__)
def get_func(func_name):
"""Helper to return a function object by name. func_name must identify a
function in this module or the path to a function relative to the base
'modeling' module.
"""
if func_name == '':
return None
try:
# these two keywords means we need to use the functions from the modeling_rel directory
if func_name.find('VGG') >= 0 or func_name.find('roi_2mlp_head') >= 0:
dir_name = 'modeling_rel.'
else:
dir_name = 'modeling.'
parts = func_name.split('.')
# Refers to a function in this module
if len(parts) == 1:
return globals()[parts[0]]
# Otherwise, assume we're referencing a module under modeling
module_name = dir_name + '.'.join(parts[:-1])
module = importlib.import_module(module_name)
return getattr(module, parts[-1])
except Exception:
logger.error('Failed to find function: %s', func_name)
raise
def check_inference(net_func):
@wraps(net_func)
def wrapper(self, *args, **kwargs):
if not self.training:
if cfg.PYTORCH_VERSION_LESS_THAN_040:
return net_func(self, *args, **kwargs)
else:
with torch.no_grad():
return net_func(self, *args, **kwargs)
else:
raise ValueError('You should call this function only on inference.'
'Set the network in inference mode by net.eval().')
return wrapper
class Generalized_RCNN(nn.Module):
def __init__(self):
super().__init__()
# For cache
self.mapping_to_detectron = None
self.orphans_in_detectron = None
# Backbone for feature extraction
self.Conv_Body = get_func(cfg.MODEL.CONV_BODY)()
# Region Proposal Network
if cfg.RPN.RPN_ON:
self.RPN = rpn_heads.generic_rpn_outputs(
self.Conv_Body.dim_out, self.Conv_Body.spatial_scale)
if cfg.FPN.FPN_ON:
# Only supports case when RPN and ROI min levels are the same
assert cfg.FPN.RPN_MIN_LEVEL == cfg.FPN.ROI_MIN_LEVEL
# RPN max level can be >= to ROI max level
assert cfg.FPN.RPN_MAX_LEVEL >= cfg.FPN.ROI_MAX_LEVEL
# FPN RPN max level might be > FPN ROI max level in which case we
# need to discard some leading conv blobs (blobs are ordered from
# max/coarsest level to min/finest level)
self.num_roi_levels = cfg.FPN.ROI_MAX_LEVEL - cfg.FPN.ROI_MIN_LEVEL + 1
# Retain only the spatial scales that will be used for RoI heads. `Conv_Body.spatial_scale`
# may include extra scales that are used for RPN proposals, but not for RoI heads.
self.Conv_Body.spatial_scale = self.Conv_Body.spatial_scale[-self.num_roi_levels:]
# BBOX Branch
self.Box_Head = get_func(cfg.FAST_RCNN.ROI_BOX_HEAD)(
self.RPN.dim_out, self.roi_feature_transform, self.Conv_Body.spatial_scale)
self.Box_Outs = fast_rcnn_heads.fast_rcnn_outputs(
self.Box_Head.dim_out)
self.Prd_RCNN = copy.deepcopy(self)
del self.Prd_RCNN.RPN
del self.Prd_RCNN.Box_Outs
# rel pyramid connection
if cfg.MODEL.USE_REL_PYRAMID:
assert cfg.FPN.FPN_ON
self.RelPyramid = rel_pyramid_module.rel_pyramid_module(self.num_roi_levels)
# RelPN
self.RelPN = relpn_heads.generic_relpn_outputs()
# RelDN
self.RelDN = reldn_heads.reldn_head(self.Box_Head.dim_out * 3)
self._init_modules()
# initialize S/O branches AFTER init_weigths so that weights can be automatically copied
if cfg.MODEL.ADD_SO_SCORES:
self.S_Head = copy.deepcopy(self.Box_Head)
self.O_Head = copy.deepcopy(self.Box_Head)
for p in self.S_Head.parameters():
p.requires_grad = True
for p in self.O_Head.parameters():
p.requires_grad = True
def _init_modules(self):
# VGG16 imagenet pretrained model is initialized in VGG16.py
if cfg.RESNETS.IMAGENET_PRETRAINED_WEIGHTS != '':
logger.info("Loading pretrained weights from %s", cfg.RESNETS.IMAGENET_PRETRAINED_WEIGHTS)
resnet_utils.load_pretrained_imagenet_weights(self)
for p in self.Conv_Body.parameters():
p.requires_grad = False
if cfg.RESNETS.VRD_PRETRAINED_WEIGHTS != '':
self.load_detector_weights(cfg.RESNETS.VRD_PRETRAINED_WEIGHTS)
if cfg.VGG16.VRD_PRETRAINED_WEIGHTS != '':
self.load_detector_weights(cfg.VGG16.VRD_PRETRAINED_WEIGHTS)
if cfg.RESNETS.VG_PRETRAINED_WEIGHTS != '':
self.load_detector_weights(cfg.RESNETS.VG_PRETRAINED_WEIGHTS)
if cfg.VGG16.VG_PRETRAINED_WEIGHTS != '':
self.load_detector_weights(cfg.VGG16.VG_PRETRAINED_WEIGHTS)
if cfg.RESNETS.OI_REL_PRETRAINED_WEIGHTS != '':
self.load_detector_weights(cfg.RESNETS.OI_REL_PRETRAINED_WEIGHTS)
if cfg.VGG16.OI_REL_PRETRAINED_WEIGHTS != '':
self.load_detector_weights(cfg.VGG16.OI_REL_PRETRAINED_WEIGHTS)
if cfg.RESNETS.VRD_PRD_PRETRAINED_WEIGHTS != '' or cfg.VGG16.VRD_PRD_PRETRAINED_WEIGHTS != '' or \
cfg.RESNETS.VG_PRD_PRETRAINED_WEIGHTS != '' or cfg.VGG16.VG_PRD_PRETRAINED_WEIGHTS != '' or \
cfg.RESNETS.OI_REL_PRD_PRETRAINED_WEIGHTS != '' or cfg.VGG16.OI_REL_PRD_PRETRAINED_WEIGHTS != '':
if cfg.RESNETS.VRD_PRD_PRETRAINED_WEIGHTS != '':
logger.info("loading prd pretrained weights from %s", cfg.RESNETS.VRD_PRD_PRETRAINED_WEIGHTS)
checkpoint = torch.load(cfg.RESNETS.VRD_PRD_PRETRAINED_WEIGHTS, map_location=lambda storage, loc: storage)
if cfg.VGG16.VRD_PRD_PRETRAINED_WEIGHTS != '':
logger.info("loading prd pretrained weights from %s", cfg.VGG16.VRD_PRD_PRETRAINED_WEIGHTS)
checkpoint = torch.load(cfg.VGG16.VRD_PRD_PRETRAINED_WEIGHTS, map_location=lambda storage, loc: storage)
if cfg.RESNETS.VG_PRD_PRETRAINED_WEIGHTS != '':
logger.info("loading prd pretrained weights from %s", cfg.RESNETS.VG_PRD_PRETRAINED_WEIGHTS)
checkpoint = torch.load(cfg.RESNETS.VG_PRD_PRETRAINED_WEIGHTS, map_location=lambda storage, loc: storage)
if cfg.VGG16.VG_PRD_PRETRAINED_WEIGHTS != '':
logger.info("loading prd pretrained weights from %s", cfg.VGG16.VG_PRD_PRETRAINED_WEIGHTS)
checkpoint = torch.load(cfg.VGG16.VG_PRD_PRETRAINED_WEIGHTS, map_location=lambda storage, loc: storage)
if cfg.RESNETS.OI_REL_PRD_PRETRAINED_WEIGHTS != '':
logger.info("loading prd pretrained weights from %s", cfg.RESNETS.OI_REL_PRD_PRETRAINED_WEIGHTS)
checkpoint = torch.load(cfg.RESNETS.OI_REL_PRD_PRETRAINED_WEIGHTS, map_location=lambda storage, loc: storage)
if cfg.VGG16.OI_REL_PRD_PRETRAINED_WEIGHTS != '':
logger.info("loading prd pretrained weights from %s", cfg.VGG16.OI_REL_PRD_PRETRAINED_WEIGHTS)
checkpoint = torch.load(cfg.VGG16.OI_REL_PRD_PRETRAINED_WEIGHTS, map_location=lambda storage, loc: storage)
# not using the last softmax layers
del checkpoint['model']['Box_Outs.cls_score.weight']
del checkpoint['model']['Box_Outs.cls_score.bias']
del checkpoint['model']['Box_Outs.bbox_pred.weight']
del checkpoint['model']['Box_Outs.bbox_pred.bias']
net_utils_rel.load_ckpt_rel(self.Prd_RCNN, checkpoint['model'])
if cfg.TRAIN.FREEZE_PRD_CONV_BODY:
for p in self.Prd_RCNN.Conv_Body.parameters():
p.requires_grad = False
if cfg.TRAIN.FREEZE_PRD_BOX_HEAD:
for p in self.Prd_RCNN.Box_Head.parameters():
p.requires_grad = False
if cfg.RESNETS.TO_BE_FINETUNED_WEIGHTS != '' or cfg.VGG16.TO_BE_FINETUNED_WEIGHTS != '':
if cfg.RESNETS.TO_BE_FINETUNED_WEIGHTS != '':
logger.info("loading trained and to be finetuned weights from %s", cfg.RESNETS.TO_BE_FINETUNED_WEIGHTS)
checkpoint = torch.load(cfg.RESNETS.TO_BE_FINETUNED_WEIGHTS, map_location=lambda storage, loc: storage)
if cfg.VGG16.TO_BE_FINETUNED_WEIGHTS != '':
logger.info("loading trained and to be finetuned weights from %s", cfg.VGG16.TO_BE_FINETUNED_WEIGHTS)
checkpoint = torch.load(cfg.VGG16.TO_BE_FINETUNED_WEIGHTS, map_location=lambda storage, loc: storage)
net_utils_rel.load_ckpt_rel(self, checkpoint['model'])
for p in self.Conv_Body.parameters():
p.requires_grad = False
for p in self.RPN.parameters():
p.requires_grad = False
if not cfg.MODEL.UNFREEZE_DET:
for p in self.Box_Head.parameters():
p.requires_grad = False
for p in self.Box_Outs.parameters():
p.requires_grad = False
if cfg.RESNETS.REL_PRETRAINED_WEIGHTS != '':
logger.info("loading rel pretrained weights from %s", cfg.RESNETS.REL_PRETRAINED_WEIGHTS)
checkpoint = torch.load(cfg.RESNETS.REL_PRETRAINED_WEIGHTS, map_location=lambda storage, loc: storage)
prd_rcnn_state_dict = {}
reldn_state_dict = {}
for name in checkpoint['model']:
if name.find('Prd_RCNN') >= 0:
prd_rcnn_state_dict[name] = checkpoint['model'][name]
if name.find('RelDN') >= 0:
reldn_state_dict[name] = checkpoint['model'][name]
net_utils_rel.load_ckpt_rel(self.Prd_RCNN, prd_rcnn_state_dict)
if cfg.TRAIN.FREEZE_PRD_CONV_BODY:
for p in self.Prd_RCNN.Conv_Body.parameters():
p.requires_grad = False
if cfg.TRAIN.FREEZE_PRD_BOX_HEAD:
for p in self.Prd_RCNN.Box_Head.parameters():
p.requires_grad = False
del reldn_state_dict['RelDN.prd_cls_scores.weight']
del reldn_state_dict['RelDN.prd_cls_scores.bias']
if 'RelDN.prd_sbj_scores.weight' in reldn_state_dict:
del reldn_state_dict['RelDN.prd_sbj_scores.weight']
if 'RelDN.prd_sbj_scores.bias' in reldn_state_dict:
del reldn_state_dict['RelDN.prd_sbj_scores.bias']
if 'RelDN.prd_obj_scores.weight' in reldn_state_dict:
del reldn_state_dict['RelDN.prd_obj_scores.weight']
if 'RelDN.prd_obj_scores.bias' in reldn_state_dict:
del reldn_state_dict['RelDN.prd_obj_scores.bias']
if 'RelDN.spt_cls_scores.weight' in reldn_state_dict:
del reldn_state_dict['RelDN.spt_cls_scores.weight']
if 'RelDN.spt_cls_scores.bias' in reldn_state_dict:
del reldn_state_dict['RelDN.spt_cls_scores.bias']
net_utils_rel.load_ckpt_rel(self.RelDN, reldn_state_dict)
def load_detector_weights(self, weight_name):
logger.info("loading pretrained weights from %s", weight_name)
checkpoint = torch.load(weight_name, map_location=lambda storage, loc: storage)
net_utils_rel.load_ckpt_rel(self, checkpoint['model'])
# freeze everything above the rel module
for p in self.Conv_Body.parameters():
p.requires_grad = False
for p in self.RPN.parameters():
p.requires_grad = False
if not cfg.MODEL.UNFREEZE_DET:
for p in self.Box_Head.parameters():
p.requires_grad = False
for p in self.Box_Outs.parameters():
p.requires_grad = False
def forward(self, data, im_info, do_vis=False, dataset_name=None, roidb=None, use_gt_labels=False, **rpn_kwargs):
if cfg.PYTORCH_VERSION_LESS_THAN_040:
return self._forward(data, im_info, do_vis, dataset_name, roidb, use_gt_labels, **rpn_kwargs)
else:
with torch.set_grad_enabled(self.training):
return self._forward(data, im_info, do_vis, dataset_name, roidb, use_gt_labels, **rpn_kwargs)
def _forward(self, data, im_info, do_vis=False, dataset_name=None, roidb=None, use_gt_labels=False, **rpn_kwargs):
im_data = data
if self.training:
roidb = list(map(lambda x: blob_utils.deserialize(x)[0], roidb))
if dataset_name is not None:
dataset_name = blob_utils.deserialize(dataset_name)
else:
dataset_name = cfg.TRAIN.DATASETS[0] if self.training else cfg.TEST.DATASETS[0] # assuming only one dataset per run
device_id = im_data.get_device()
return_dict = {} # A dict to collect return variables
blob_conv = self.Conv_Body(im_data)
if not cfg.MODEL.USE_REL_PYRAMID:
blob_conv_prd = self.Prd_RCNN.Conv_Body(im_data)
rpn_ret = self.RPN(blob_conv, im_info, roidb)
if cfg.FPN.FPN_ON:
# Retain only the blobs that will be used for RoI heads. `blob_conv` may include
# extra blobs that are used for RPN proposals, but not for RoI heads.
blob_conv = blob_conv[-self.num_roi_levels:]
if not cfg.MODEL.USE_REL_PYRAMID:
blob_conv_prd = blob_conv_prd[-self.num_roi_levels:]
else:
blob_conv_prd = self.RelPyramid(blob_conv)
if cfg.MODEL.SHARE_RES5 and self.training:
box_feat, res5_feat = self.Box_Head(blob_conv, rpn_ret, use_relu=True)
else:
box_feat = self.Box_Head(blob_conv, rpn_ret, use_relu=True)
cls_score, bbox_pred = self.Box_Outs(box_feat)
# now go through the predicate branch
use_relu = False if cfg.MODEL.NO_FC7_RELU else True
if self.training:
fg_inds = np.where(rpn_ret['labels_int32'] > 0)[0]
det_rois = rpn_ret['rois'][fg_inds]
det_labels = rpn_ret['labels_int32'][fg_inds]
det_scores = F.softmax(cls_score[fg_inds], dim=1)
rel_ret = self.RelPN(det_rois, det_labels, det_scores, im_info, dataset_name, roidb)
if cfg.MODEL.ADD_SO_SCORES:
sbj_feat = self.S_Head(blob_conv, rel_ret, rois_name='sbj_rois', use_relu=use_relu)
obj_feat = self.O_Head(blob_conv, rel_ret, rois_name='obj_rois', use_relu=use_relu)
else:
sbj_feat = self.Box_Head(blob_conv, rel_ret, rois_name='sbj_rois', use_relu=use_relu)
obj_feat = self.Box_Head(blob_conv, rel_ret, rois_name='obj_rois', use_relu=use_relu)
if cfg.MODEL.USE_NODE_CONTRASTIVE_LOSS or cfg.MODEL.USE_NODE_CONTRASTIVE_SO_AWARE_LOSS or cfg.MODEL.USE_NODE_CONTRASTIVE_P_AWARE_LOSS:
if cfg.MODEL.ADD_SO_SCORES:
# sbj
sbj_feat_sbj_pos = self.S_Head(blob_conv, rel_ret, rois_name='sbj_rois_sbj_pos', use_relu=use_relu)
obj_feat_sbj_pos = self.O_Head(blob_conv, rel_ret, rois_name='obj_rois_sbj_pos', use_relu=use_relu)
# obj
sbj_feat_obj_pos = self.S_Head(blob_conv, rel_ret, rois_name='sbj_rois_obj_pos', use_relu=use_relu)
obj_feat_obj_pos = self.O_Head(blob_conv, rel_ret, rois_name='obj_rois_obj_pos', use_relu=use_relu)
else:
# sbj
sbj_feat_sbj_pos = self.Box_Head(blob_conv, rel_ret, rois_name='sbj_rois_sbj_pos', use_relu=use_relu)
obj_feat_sbj_pos = self.Box_Head(blob_conv, rel_ret, rois_name='obj_rois_sbj_pos', use_relu=use_relu)
# obj
sbj_feat_obj_pos = self.Box_Head(blob_conv, rel_ret, rois_name='sbj_rois_obj_pos', use_relu=use_relu)
obj_feat_obj_pos = self.Box_Head(blob_conv, rel_ret, rois_name='obj_rois_obj_pos', use_relu=use_relu)
else:
if roidb is not None:
im_scale = im_info.data.numpy()[:, 2][0]
im_w = im_info.data.numpy()[:, 1][0]
im_h = im_info.data.numpy()[:, 0][0]
sbj_boxes = roidb['sbj_gt_boxes']
obj_boxes = roidb['obj_gt_boxes']
sbj_rois = sbj_boxes * im_scale
obj_rois = obj_boxes * im_scale
repeated_batch_idx = 0 * blob_utils.ones((sbj_rois.shape[0], 1))
sbj_rois = np.hstack((repeated_batch_idx, sbj_rois))
obj_rois = np.hstack((repeated_batch_idx, obj_rois))
rel_rois = box_utils_rel.rois_union(sbj_rois, obj_rois)
rel_ret = {}
rel_ret['sbj_rois'] = sbj_rois
rel_ret['obj_rois'] = obj_rois
rel_ret['rel_rois'] = rel_rois
if cfg.FPN.FPN_ON and cfg.FPN.MULTILEVEL_ROIS:
lvl_min = cfg.FPN.ROI_MIN_LEVEL
lvl_max = cfg.FPN.ROI_MAX_LEVEL
rois_blob_names = ['sbj_rois', 'obj_rois', 'rel_rois']
for rois_blob_name in rois_blob_names:
# Add per FPN level roi blobs named like: <rois_blob_name>_fpn<lvl>
target_lvls = fpn_utils.map_rois_to_fpn_levels(
rel_ret[rois_blob_name][:, 1:5], lvl_min, lvl_max)
fpn_utils.add_multilevel_roi_blobs(
rel_ret, rois_blob_name, rel_ret[rois_blob_name], target_lvls,
lvl_min, lvl_max)
sbj_det_feat = self.Box_Head(blob_conv, rel_ret, rois_name='sbj_rois', use_relu=True)
sbj_cls_scores, _ = self.Box_Outs(sbj_det_feat)
sbj_cls_scores = sbj_cls_scores.data.cpu().numpy()
obj_det_feat = self.Box_Head(blob_conv, rel_ret, rois_name='obj_rois', use_relu=True)
obj_cls_scores, _ = self.Box_Outs(obj_det_feat)
obj_cls_scores = obj_cls_scores.data.cpu().numpy()
if use_gt_labels:
sbj_labels = roidb['sbj_gt_classes'] # start from 0
obj_labels = roidb['obj_gt_classes'] # start from 0
sbj_scores = np.ones_like(sbj_labels, dtype=np.float32)
obj_scores = np.ones_like(obj_labels, dtype=np.float32)
else:
sbj_labels = np.argmax(sbj_cls_scores[:, 1:], axis=1)
obj_labels = np.argmax(obj_cls_scores[:, 1:], axis=1)
sbj_scores = np.amax(sbj_cls_scores[:, 1:], axis=1)
obj_scores = np.amax(obj_cls_scores[:, 1:], axis=1)
rel_ret['sbj_scores'] = sbj_scores.astype(np.float32, copy=False)
rel_ret['obj_scores'] = obj_scores.astype(np.float32, copy=False)
rel_ret['sbj_labels'] = sbj_labels.astype(np.int32, copy=False) + 1 # need to start from 1
rel_ret['obj_labels'] = obj_labels.astype(np.int32, copy=False) + 1 # need to start from 1
rel_ret['all_sbj_labels_int32'] = sbj_labels.astype(np.int32, copy=False)
rel_ret['all_obj_labels_int32'] = obj_labels.astype(np.int32, copy=False)
if cfg.MODEL.USE_SPATIAL_FEAT:
spt_feat = box_utils_rel.get_spt_features(sbj_boxes, obj_boxes, im_w, im_h)
rel_ret['spt_feat'] = spt_feat
if cfg.MODEL.ADD_SO_SCORES:
sbj_feat = self.S_Head(blob_conv, rel_ret, rois_name='sbj_rois', use_relu=use_relu)
obj_feat = self.O_Head(blob_conv, rel_ret, rois_name='obj_rois', use_relu=use_relu)
else:
sbj_feat = self.Box_Head(blob_conv, rel_ret, rois_name='sbj_rois', use_relu=use_relu)
obj_feat = self.Box_Head(blob_conv, rel_ret, rois_name='obj_rois', use_relu=use_relu)
else:
score_thresh = cfg.TEST.SCORE_THRESH
while score_thresh >= -1e-06: # a negative value very close to 0.0
det_rois, det_labels, det_scores = \
self.prepare_det_rois(rpn_ret['rois'], cls_score, bbox_pred, im_info, score_thresh)
rel_ret = self.RelPN(det_rois, det_labels, det_scores, im_info, dataset_name, roidb)
valid_len = len(rel_ret['rel_rois'])
if valid_len > 0:
break
logger.info('Got {} rel_rois when score_thresh={}, changing to {}'.format(
valid_len, score_thresh, score_thresh - 0.01))
score_thresh -= 0.01
if cfg.MODEL.ADD_SO_SCORES:
det_s_feat = self.S_Head(blob_conv, rel_ret, rois_name='det_rois', use_relu=use_relu)
det_o_feat = self.O_Head(blob_conv, rel_ret, rois_name='det_rois', use_relu=use_relu)
sbj_feat = det_s_feat[rel_ret['sbj_inds']]
obj_feat = det_o_feat[rel_ret['obj_inds']]
else:
det_feat = self.Box_Head(blob_conv, rel_ret, rois_name='det_rois', use_relu=use_relu)
sbj_feat = det_feat[rel_ret['sbj_inds']]
obj_feat = det_feat[rel_ret['obj_inds']]
rel_feat = self.Prd_RCNN.Box_Head(blob_conv_prd, rel_ret, rois_name='rel_rois', use_relu=use_relu)
spo_feat = torch.cat((sbj_feat, rel_feat, obj_feat), dim=1)
if cfg.MODEL.USE_SPATIAL_FEAT:
spt_feat = rel_ret['spt_feat']
else:
spt_feat = None
if cfg.MODEL.USE_FREQ_BIAS or cfg.MODEL.RUN_BASELINE:
sbj_labels = rel_ret['all_sbj_labels_int32']
obj_labels = rel_ret['all_obj_labels_int32']
else:
sbj_labels = None
obj_labels = None
# prd_scores is the visual scores. See reldn_heads.py
prd_scores, prd_bias_scores, prd_spt_scores, ttl_cls_scores, sbj_cls_scores, obj_cls_scores = \
self.RelDN(spo_feat, spt_feat, sbj_labels, obj_labels, sbj_feat, obj_feat)
if self.training:
return_dict['losses'] = {}
return_dict['metrics'] = {}
# rpn loss
rpn_kwargs.update(dict(
(k, rpn_ret[k]) for k in rpn_ret.keys()
if (k.startswith('rpn_cls_logits') or k.startswith('rpn_bbox_pred'))
))
loss_rpn_cls, loss_rpn_bbox = rpn_heads.generic_rpn_losses(**rpn_kwargs)
if cfg.FPN.FPN_ON:
for i, lvl in enumerate(range(cfg.FPN.RPN_MIN_LEVEL, cfg.FPN.RPN_MAX_LEVEL + 1)):
return_dict['losses']['loss_rpn_cls_fpn%d' % lvl] = loss_rpn_cls[i]
return_dict['losses']['loss_rpn_bbox_fpn%d' % lvl] = loss_rpn_bbox[i]
else:
return_dict['losses']['loss_rpn_cls'] = loss_rpn_cls
return_dict['losses']['loss_rpn_bbox'] = loss_rpn_bbox
# bbox loss
loss_cls, loss_bbox, accuracy_cls = fast_rcnn_heads.fast_rcnn_losses(
cls_score, bbox_pred, rpn_ret['labels_int32'], rpn_ret['bbox_targets'],
rpn_ret['bbox_inside_weights'], rpn_ret['bbox_outside_weights'])
return_dict['losses']['loss_cls'] = loss_cls
return_dict['losses']['loss_bbox'] = loss_bbox
return_dict['metrics']['accuracy_cls'] = accuracy_cls
if cfg.MODEL.USE_FREQ_BIAS and not cfg.MODEL.ADD_SCORES_ALL:
loss_cls_bias, accuracy_cls_bias = reldn_heads.reldn_losses(
prd_bias_scores, rel_ret['all_prd_labels_int32'])
return_dict['losses']['loss_cls_bias'] = loss_cls_bias
return_dict['metrics']['accuracy_cls_bias'] = accuracy_cls_bias
if cfg.MODEL.USE_SPATIAL_FEAT and not cfg.MODEL.ADD_SCORES_ALL:
loss_cls_spt, accuracy_cls_spt = reldn_heads.reldn_losses(
prd_spt_scores, rel_ret['all_prd_labels_int32'])
return_dict['losses']['loss_cls_spt'] = loss_cls_spt
return_dict['metrics']['accuracy_cls_spt'] = accuracy_cls_spt
if cfg.MODEL.ADD_SCORES_ALL:
loss_cls_ttl, accuracy_cls_ttl = reldn_heads.reldn_losses(
ttl_cls_scores, rel_ret['all_prd_labels_int32'])
return_dict['losses']['loss_cls_ttl'] = loss_cls_ttl
return_dict['metrics']['accuracy_cls_ttl'] = accuracy_cls_ttl
else:
loss_cls_prd, accuracy_cls_prd = reldn_heads.reldn_losses(
prd_scores, rel_ret['all_prd_labels_int32'])
return_dict['losses']['loss_cls_prd'] = loss_cls_prd
return_dict['metrics']['accuracy_cls_prd'] = accuracy_cls_prd
if cfg.MODEL.USE_NODE_CONTRASTIVE_LOSS or cfg.MODEL.USE_NODE_CONTRASTIVE_SO_AWARE_LOSS or cfg.MODEL.USE_NODE_CONTRASTIVE_P_AWARE_LOSS:
# sbj
rel_feat_sbj_pos = self.Prd_RCNN.Box_Head(blob_conv_prd, rel_ret, rois_name='rel_rois_sbj_pos', use_relu=use_relu)
spo_feat_sbj_pos = torch.cat((sbj_feat_sbj_pos, rel_feat_sbj_pos, obj_feat_sbj_pos), dim=1)
if cfg.MODEL.USE_SPATIAL_FEAT:
spt_feat_sbj_pos = rel_ret['spt_feat_sbj_pos']
else:
spt_feat_sbj_pos = None
if cfg.MODEL.USE_FREQ_BIAS or cfg.MODEL.RUN_BASELINE:
sbj_labels_sbj_pos_fg = rel_ret['sbj_labels_sbj_pos_fg_int32']
obj_labels_sbj_pos_fg = rel_ret['obj_labels_sbj_pos_fg_int32']
else:
sbj_labels_sbj_pos_fg = None
obj_labels_sbj_pos_fg = None
_, prd_bias_scores_sbj_pos, _, ttl_cls_scores_sbj_pos, _, _ = \
self.RelDN(spo_feat_sbj_pos, spt_feat_sbj_pos, sbj_labels_sbj_pos_fg, obj_labels_sbj_pos_fg, sbj_feat_sbj_pos, obj_feat_sbj_pos)
# obj
rel_feat_obj_pos = self.Prd_RCNN.Box_Head(blob_conv_prd, rel_ret, rois_name='rel_rois_obj_pos', use_relu=use_relu)
spo_feat_obj_pos = torch.cat((sbj_feat_obj_pos, rel_feat_obj_pos, obj_feat_obj_pos), dim=1)
if cfg.MODEL.USE_SPATIAL_FEAT:
spt_feat_obj_pos = rel_ret['spt_feat_obj_pos']
else:
spt_feat_obj_pos = None
if cfg.MODEL.USE_FREQ_BIAS or cfg.MODEL.RUN_BASELINE:
sbj_labels_obj_pos_fg = rel_ret['sbj_labels_obj_pos_fg_int32']
obj_labels_obj_pos_fg = rel_ret['obj_labels_obj_pos_fg_int32']
else:
sbj_labels_obj_pos_fg = None
obj_labels_obj_pos_fg = None
_, prd_bias_scores_obj_pos, _, ttl_cls_scores_obj_pos, _, _ = \
self.RelDN(spo_feat_obj_pos, spt_feat_obj_pos, sbj_labels_obj_pos_fg, obj_labels_obj_pos_fg, sbj_feat_obj_pos, obj_feat_obj_pos)
if cfg.MODEL.USE_NODE_CONTRASTIVE_LOSS:
loss_contrastive_sbj, loss_contrastive_obj = reldn_heads.reldn_contrastive_losses(
ttl_cls_scores_sbj_pos, ttl_cls_scores_obj_pos, rel_ret)
return_dict['losses']['loss_contrastive_sbj'] = loss_contrastive_sbj * cfg.MODEL.NODE_CONTRASTIVE_WEIGHT
return_dict['losses']['loss_contrastive_obj'] = loss_contrastive_obj * cfg.MODEL.NODE_CONTRASTIVE_WEIGHT
if cfg.MODEL.USE_NODE_CONTRASTIVE_SO_AWARE_LOSS:
loss_so_contrastive_sbj, loss_so_contrastive_obj = reldn_heads.reldn_so_contrastive_losses(
ttl_cls_scores_sbj_pos, ttl_cls_scores_obj_pos, rel_ret)
return_dict['losses']['loss_so_contrastive_sbj'] = loss_so_contrastive_sbj * cfg.MODEL.NODE_CONTRASTIVE_SO_AWARE_WEIGHT
return_dict['losses']['loss_so_contrastive_obj'] = loss_so_contrastive_obj * cfg.MODEL.NODE_CONTRASTIVE_SO_AWARE_WEIGHT
if cfg.MODEL.USE_NODE_CONTRASTIVE_P_AWARE_LOSS:
loss_p_contrastive_sbj, loss_p_contrastive_obj = reldn_heads.reldn_p_contrastive_losses(
ttl_cls_scores_sbj_pos, ttl_cls_scores_obj_pos, prd_bias_scores_sbj_pos, prd_bias_scores_obj_pos, rel_ret)
return_dict['losses']['loss_p_contrastive_sbj'] = loss_p_contrastive_sbj * cfg.MODEL.NODE_CONTRASTIVE_P_AWARE_WEIGHT
return_dict['losses']['loss_p_contrastive_obj'] = loss_p_contrastive_obj * cfg.MODEL.NODE_CONTRASTIVE_P_AWARE_WEIGHT
# pytorch0.4 bug on gathering scalar(0-dim) tensors
for k, v in return_dict['losses'].items():
return_dict['losses'][k] = v.unsqueeze(0)
for k, v in return_dict['metrics'].items():
return_dict['metrics'][k] = v.unsqueeze(0)
else:
# Testing
return_dict['sbj_rois'] = rel_ret['sbj_rois']
return_dict['obj_rois'] = rel_ret['obj_rois']
return_dict['sbj_labels'] = rel_ret['sbj_labels']
return_dict['obj_labels'] = rel_ret['obj_labels']
return_dict['sbj_scores'] = rel_ret['sbj_scores']
return_dict['obj_scores'] = rel_ret['obj_scores']
return_dict['prd_scores'] = prd_scores
if cfg.MODEL.USE_FREQ_BIAS:
return_dict['prd_scores_bias'] = prd_bias_scores
if cfg.MODEL.USE_SPATIAL_FEAT:
return_dict['prd_scores_spt'] = prd_spt_scores
if cfg.MODEL.ADD_SCORES_ALL:
return_dict['prd_ttl_scores'] = ttl_cls_scores
if do_vis:
return_dict['blob_conv'] = blob_conv
return_dict['blob_conv_prd'] = blob_conv_prd
return return_dict
def get_roi_inds(self, det_labels, lbls):
lbl_set = np.array(lbls)
inds = np.where(np.isin(det_labels, lbl_set))[0]
return inds
def prepare_det_rois(self, rois, cls_scores, bbox_pred, im_info, score_thresh=cfg.TEST.SCORE_THRESH):
im_info = im_info.data.cpu().numpy()
# NOTE: 'rois' is numpy array while
# 'cls_scores' and 'bbox_pred' are pytorch tensors
scores = cls_scores.data.cpu().numpy().squeeze()
# Apply bounding-box regression deltas
box_deltas = bbox_pred.data.cpu().numpy().squeeze()
assert rois.shape[0] == scores.shape[0] == box_deltas.shape[0]
det_rois = np.empty((0, 5), dtype=np.float32)
det_labels = np.empty((0), dtype=np.float32)
det_scores = np.empty((0), dtype=np.float32)
for im_i in range(cfg.TRAIN.IMS_PER_BATCH):
# get all boxes that belong to this image
inds = np.where(abs(rois[:, 0] - im_i) < 1e-06)[0]
# unscale back to raw image space
im_boxes = rois[inds, 1:5] / im_info[im_i, 2]
im_scores = scores[inds]
# In case there is 1 proposal
im_scores = im_scores.reshape([-1, im_scores.shape[-1]])
# In case there is 1 proposal
im_box_deltas = box_deltas[inds]
im_box_deltas = im_box_deltas.reshape([-1, im_box_deltas[inds].shape[-1]])
im_scores, im_boxes = self.get_det_boxes(im_boxes, im_scores, im_box_deltas, im_info[im_i][:2] / im_info[im_i][2])
im_scores, im_boxes, im_labels = self.box_results_with_nms_and_limit(im_scores, im_boxes, score_thresh)
batch_inds = im_i * np.ones(
(im_boxes.shape[0], 1), dtype=np.float32)
im_det_rois = np.hstack((batch_inds, im_boxes * im_info[im_i, 2]))
det_rois = np.append(det_rois, im_det_rois, axis=0)
det_labels = np.append(det_labels, im_labels, axis=0)
det_scores = np.append(det_scores, im_scores, axis=0)
return det_rois, det_labels, det_scores
def get_det_boxes(self, boxes, scores, box_deltas, h_and_w):
if cfg.TEST.BBOX_REG:
if cfg.MODEL.CLS_AGNOSTIC_BBOX_REG:
# Remove predictions for bg class (compat with MSRA code)
box_deltas = box_deltas[:, -4:]
if cfg.TRAIN.BBOX_NORMALIZE_TARGETS_PRECOMPUTED:
# (legacy) Optionally normalize targets by a precomputed mean and stdev
box_deltas = box_deltas.view(-1, 4) * cfg.TRAIN.BBOX_NORMALIZE_STDS \
+ cfg.TRAIN.BBOX_NORMALIZE_MEANS
pred_boxes = box_utils.bbox_transform(boxes, box_deltas, cfg.MODEL.BBOX_REG_WEIGHTS)
pred_boxes = box_utils.clip_tiled_boxes(pred_boxes, h_and_w)
if cfg.MODEL.CLS_AGNOSTIC_BBOX_REG:
pred_boxes = np.tile(pred_boxes, (1, scores.shape[1]))
else:
# Simply repeat the boxes, once for each class
pred_boxes = np.tile(boxes, (1, scores.shape[1]))
if cfg.DEDUP_BOXES > 0 and not cfg.MODEL.FASTER_RCNN:
# Map scores and predictions back to the original set of boxes
scores = scores[inv_index, :]
pred_boxes = pred_boxes[inv_index, :]
return scores, pred_boxes
def box_results_with_nms_and_limit(self, scores, boxes, score_thresh=cfg.TEST.SCORE_THRESH):
num_classes = cfg.MODEL.NUM_CLASSES
cls_boxes = [[] for _ in range(num_classes)]
# Apply threshold on detection probabilities and apply NMS
# Skip j = 0, because it's the background class
for j in range(1, num_classes):
inds = np.where(scores[:, j] > score_thresh)[0]
scores_j = scores[inds, j]
boxes_j = boxes[inds, j * 4:(j + 1) * 4]
dets_j = np.hstack((boxes_j, scores_j[:, np.newaxis])).astype(np.float32, copy=False)
if cfg.TEST.SOFT_NMS.ENABLED:
nms_dets, _ = box_utils.soft_nms(
dets_j,
sigma=cfg.TEST.SOFT_NMS.SIGMA,
overlap_thresh=cfg.TEST.NMS,
score_thresh=0.0001,
method=cfg.TEST.SOFT_NMS.METHOD
)
else:
keep = box_utils.nms(dets_j, cfg.TEST.NMS)
nms_dets = dets_j[keep, :]
# add labels
label_j = np.ones((nms_dets.shape[0], 1), dtype=np.float32) * j
nms_dets = np.hstack((nms_dets, label_j))
# Refine the post-NMS boxes using bounding-box voting
if cfg.TEST.BBOX_VOTE.ENABLED:
nms_dets = box_utils.box_voting(
nms_dets,
dets_j,
cfg.TEST.BBOX_VOTE.VOTE_TH,
scoring_method=cfg.TEST.BBOX_VOTE.SCORING_METHOD
)
cls_boxes[j] = nms_dets
# Limit to max_per_image detections **over all classes**
if cfg.TEST.DETECTIONS_PER_IM > 0:
image_scores = np.hstack(
[cls_boxes[j][:, -2] for j in range(1, num_classes)]
)
if len(image_scores) > cfg.TEST.DETECTIONS_PER_IM:
image_thresh = np.sort(image_scores)[-cfg.TEST.DETECTIONS_PER_IM]
for j in range(1, num_classes):
keep = np.where(cls_boxes[j][:, -2] >= image_thresh)[0]
cls_boxes[j] = cls_boxes[j][keep, :]
im_results = np.vstack([cls_boxes[j] for j in range(1, num_classes)])
boxes = im_results[:, :-2]
scores = im_results[:, -2]
labels = im_results[:, -1]
return scores, boxes, labels
def roi_feature_transform(self, blobs_in, rpn_ret, blob_rois='rois', method='RoIPoolF',
resolution=7, spatial_scale=1. / 16., sampling_ratio=0):
"""Add the specified RoI pooling method. The sampling_ratio argument
is supported for some, but not all, RoI transform methods.
RoIFeatureTransform abstracts away:
- Use of FPN or not
- Specifics of the transform method
"""
assert method in {'RoIPoolF', 'RoICrop', 'RoIAlign'}, \
'Unknown pooling method: {}'.format(method)
if isinstance(blobs_in, list):
# FPN case: add RoIFeatureTransform to each FPN level
device_id = blobs_in[0].get_device()
k_max = cfg.FPN.ROI_MAX_LEVEL # coarsest level of pyramid
k_min = cfg.FPN.ROI_MIN_LEVEL # finest level of pyramid
assert len(blobs_in) == k_max - k_min + 1
bl_out_list = []
for lvl in range(k_min, k_max + 1):
bl_in = blobs_in[k_max - lvl] # blobs_in is in reversed order
sc = spatial_scale[k_max - lvl] # in reversed order
bl_rois = blob_rois + '_fpn' + str(lvl)
if len(rpn_ret[bl_rois]):
rois = Variable(torch.from_numpy(rpn_ret[bl_rois])).cuda(device_id)
if method == 'RoIPoolF':
# Warning!: Not check if implementation matches Detectron
xform_out = RoIPoolFunction(resolution, resolution, sc)(bl_in, rois)
elif method == 'RoICrop':
# Warning!: Not check if implementation matches Detectron
grid_xy = net_utils.affine_grid_gen(
rois, bl_in.size()[2:], self.grid_size)
grid_yx = torch.stack(
[grid_xy.data[:, :, :, 1], grid_xy.data[:, :, :, 0]], 3).contiguous()
xform_out = RoICropFunction()(bl_in, Variable(grid_yx).detach())
if cfg.CROP_RESIZE_WITH_MAX_POOL:
xform_out = F.max_pool2d(xform_out, 2, 2)
elif method == 'RoIAlign':
xform_out = RoIAlignFunction(
resolution, resolution, sc, sampling_ratio)(bl_in, rois)
bl_out_list.append(xform_out)
# The pooled features from all levels are concatenated along the
# batch dimension into a single 4D tensor.
xform_shuffled = torch.cat(bl_out_list, dim=0)
# Unshuffle to match rois from dataloader
device_id = xform_shuffled.get_device()
restore_bl = rpn_ret[blob_rois + '_idx_restore_int32']
restore_bl = Variable(
torch.from_numpy(restore_bl.astype('int64', copy=False))).cuda(device_id)
xform_out = xform_shuffled[restore_bl]
else:
# Single feature level
# rois: holds R regions of interest, each is a 5-tuple
# (batch_idx, x1, y1, x2, y2) specifying an image batch index and a
# rectangle (x1, y1, x2, y2)
device_id = blobs_in.get_device()
rois = Variable(torch.from_numpy(rpn_ret[blob_rois])).cuda(device_id)
if method == 'RoIPoolF':
xform_out = RoIPoolFunction(resolution, resolution, spatial_scale)(blobs_in, rois)
elif method == 'RoICrop':
grid_xy = net_utils.affine_grid_gen(rois, blobs_in.size()[2:], self.grid_size)
grid_yx = torch.stack(
[grid_xy.data[:, :, :, 1], grid_xy.data[:, :, :, 0]], 3).contiguous()
xform_out = RoICropFunction()(blobs_in, Variable(grid_yx).detach())
if cfg.CROP_RESIZE_WITH_MAX_POOL:
xform_out = F.max_pool2d(xform_out, 2, 2)
elif method == 'RoIAlign':
xform_out = RoIAlignFunction(
resolution, resolution, spatial_scale, sampling_ratio)(blobs_in, rois)
return xform_out
@check_inference
def convbody_net(self, data):
"""For inference. Run Conv Body only"""
blob_conv = self.Conv_Body(data)
if cfg.FPN.FPN_ON:
# Retain only the blobs that will be used for RoI heads. `blob_conv` may include
# extra blobs that are used for RPN proposals, but not for RoI heads.
blob_conv = blob_conv[-self.num_roi_levels:]
return blob_conv
@property
def detectron_weight_mapping(self):
if self.mapping_to_detectron is None:
d_wmap = {} # detectron_weight_mapping
d_orphan = [] # detectron orphan weight list
for name, m_child in self.named_children():
if list(m_child.parameters()): # if module has any parameter
child_map, child_orphan = m_child.detectron_weight_mapping()
d_orphan.extend(child_orphan)
for key, value in child_map.items():
new_key = name + '.' + key
d_wmap[new_key] = value
self.mapping_to_detectron = d_wmap
self.orphans_in_detectron = d_orphan
return self.mapping_to_detectron, self.orphans_in_detectron
def _add_loss(self, return_dict, key, value):
"""Add loss tensor to returned dictionary"""
return_dict['losses'][key] = value
| ContrastiveLosses4VRD-master | lib/modeling_rel/model_builder_rel.py |
# Some functions are adapted from Rowan Zellers:
# https://github.com/rowanz/neural-motifs
# Get counts of all of the examples in the dataset. Used for creating the baseline
# dictionary model
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
import json
import utils.boxes as box_utils
import utils_rel.boxes_rel as box_utils_rel
from core.config import cfg
from datasets_rel.dataset_catalog_rel import ANN_FN2
from datasets_rel.dataset_catalog_rel import DATASETS
# This function is adapted from Rowan Zellers:
# https://github.com/rowanz/neural-motifs/blob/master/lib/get_dataset_counts.py
# Modified for this project
def get_rel_counts(ds_name, must_overlap=True):
"""
Get counts of all of the relations. Used for modeling directly P(rel | o1, o2)
:param train_data:
:param must_overlap:
:return:
"""
if ds_name.find('vg') >= 0:
with open(DATASETS['vg_train'][ANN_FN2]) as f:
train_data = json.load(f)
elif ds_name.find('oi') >= 0:
with open(DATASETS['oi_rel_train'][ANN_FN2]) as f:
train_data = json.load(f)
elif ds_name.find('vrd') >= 0:
with open(DATASETS['vrd_train'][ANN_FN2]) as f:
train_data = json.load(f)
else:
raise NotImplementedError
fg_matrix = np.zeros((
cfg.MODEL.NUM_CLASSES - 1, # not include background
cfg.MODEL.NUM_CLASSES - 1, # not include background
cfg.MODEL.NUM_PRD_CLASSES + 1, # include background
), dtype=np.int64)
bg_matrix = np.zeros((
cfg.MODEL.NUM_CLASSES - 1, # not include background
cfg.MODEL.NUM_CLASSES - 1, # not include background
), dtype=np.int64)
for _, im_rels in train_data.items():
# get all object boxes
gt_box_to_label = {}
for i, rel in enumerate(im_rels):
sbj_box = box_utils_rel.y1y2x1x2_to_x1y1x2y2(rel['subject']['bbox'])
obj_box = box_utils_rel.y1y2x1x2_to_x1y1x2y2(rel['object']['bbox'])
sbj_lbl = rel['subject']['category'] # not include background
obj_lbl = rel['object']['category'] # not include background
prd_lbl = rel['predicate'] # not include background
if tuple(sbj_box) not in gt_box_to_label:
gt_box_to_label[tuple(sbj_box)] = sbj_lbl
if tuple(obj_box) not in gt_box_to_label:
gt_box_to_label[tuple(obj_box)] = obj_lbl
fg_matrix[sbj_lbl, obj_lbl, prd_lbl + 1] += 1
if cfg.MODEL.USE_OVLP_FILTER:
if len(gt_box_to_label):
gt_boxes = np.array(list(gt_box_to_label.keys()), dtype=np.int32)
gt_classes = np.array(list(gt_box_to_label.values()), dtype=np.int32)
o1o2_total = gt_classes[np.array(
box_filter(gt_boxes, must_overlap=must_overlap), dtype=int)]
for (o1, o2) in o1o2_total:
bg_matrix[o1, o2] += 1
else:
# consider all pairs of boxes, overlapped or non-overlapped
for b1, l1 in gt_box_to_label.items():
for b2, l2 in gt_box_to_label.items():
if b1 == b2:
continue
bg_matrix[l1, l2] += 1
return fg_matrix, bg_matrix
# This function is adapted from Rowan Zellers:
# https://github.com/rowanz/neural-motifs/blob/master/lib/get_dataset_counts.py
# Modified for this project
def box_filter(boxes, must_overlap=False):
""" Only include boxes that overlap as possible relations.
If no overlapping boxes, use all of them."""
n_cands = boxes.shape[0]
overlaps = box_utils.bbox_overlaps(boxes.astype(np.float32), boxes.astype(np.float32)) > 0
np.fill_diagonal(overlaps, 0)
all_possib = np.ones_like(overlaps, dtype=np.bool)
np.fill_diagonal(all_possib, 0)
if must_overlap:
possible_boxes = np.column_stack(np.where(overlaps))
if possible_boxes.size == 0:
possible_boxes = np.column_stack(np.where(all_possib))
else:
possible_boxes = np.column_stack(np.where(all_possib))
return possible_boxes
| ContrastiveLosses4VRD-master | lib/modeling_rel/get_dataset_counts_rel.py |
# Adapted by Ji Zhang in 2019
#
# Based on Detectron.pytorch/lib/roi_data/minibatch.py written by Roy Tseng
import numpy as np
import cv2
from core.config import cfg
import utils.blob as blob_utils
import roi_data.rpn
def get_minibatch_blob_names(is_training=True):
"""Return blob names in the order in which they are read by the data loader.
"""
# data blob: holds a batch of N images, each with 3 channels
blob_names = ['data']
if cfg.RPN.RPN_ON:
# RPN-only or end-to-end Faster R-CNN
blob_names += roi_data.rpn.get_rpn_blob_names(is_training=is_training)
elif cfg.RETINANET.RETINANET_ON:
raise NotImplementedError
else:
# Fast R-CNN like models trained on precomputed proposals
blob_names += roi_data.fast_rcnn.get_fast_rcnn_blob_names(
is_training=is_training
)
return blob_names
def get_minibatch(roidb):
"""Given a roidb, construct a minibatch sampled from it."""
# We collect blobs from each image onto a list and then concat them into a
# single tensor, hence we initialize each blob to an empty list
blobs = {k: [] for k in get_minibatch_blob_names()}
# Get the input image blob
im_blob, im_scales = _get_image_blob(roidb)
blobs['data'] = im_blob
if cfg.RPN.RPN_ON:
# RPN-only or end-to-end Faster/Mask R-CNN
valid = roi_data.rpn.add_rpn_blobs(blobs, im_scales, roidb)
elif cfg.RETINANET.RETINANET_ON:
raise NotImplementedError
else:
# Fast R-CNN like models trained on precomputed proposals
valid = roi_data.fast_rcnn.add_fast_rcnn_blobs(blobs, im_scales, roidb)
# add relpn blobs
add_relpn_blobs(blobs, im_scales, roidb)
return blobs, valid
def add_relpn_blobs(blobs, im_scales, roidb):
assert 'roidb' in blobs
valid_keys = ['dataset_name',
'sbj_gt_boxes', 'sbj_gt_classes', 'obj_gt_boxes', 'obj_gt_classes', 'prd_gt_classes',
'sbj_gt_overlaps', 'obj_gt_overlaps', 'prd_gt_overlaps', 'pair_to_gt_ind_map',
'width', 'height']
for i, e in enumerate(roidb):
for k in valid_keys:
if k in e:
blobs['roidb'][i][k] = e[k]
# Always return valid=True, since RPN minibatches are valid by design
return True
def _get_image_blob(roidb):
"""Builds an input blob from the images in the roidb at the specified
scales.
"""
num_images = len(roidb)
# Sample random scales to use for each image in this batch
scale_inds = np.random.randint(
0, high=len(cfg.TRAIN.SCALES), size=num_images)
processed_ims = []
im_scales = []
for i in range(num_images):
im = cv2.imread(roidb[i]['image'])
assert im is not None, \
'Failed to read image \'{}\''.format(roidb[i]['image'])
# If NOT using opencv to read in images, uncomment following lines
# if len(im.shape) == 2:
# im = im[:, :, np.newaxis]
# im = np.concatenate((im, im, im), axis=2)
# # flip the channel, since the original one using cv2
# # rgb -> bgr
# im = im[:, :, ::-1]
if roidb[i]['flipped']:
im = im[:, ::-1, :]
target_size = cfg.TRAIN.SCALES[scale_inds[i]]
im, im_scale = blob_utils.prep_im_for_blob(
im, cfg.PIXEL_MEANS, [target_size], cfg.TRAIN.MAX_SIZE)
im_scales.append(im_scale[0])
processed_ims.append(im[0])
# Create a blob to hold the input images [n, c, h, w]
blob = blob_utils.im_list_to_blob(processed_ims)
return blob, im_scales
| ContrastiveLosses4VRD-master | lib/roi_data_rel/minibatch_rel.py |
# Adapted by Ji Zhang, 2019
#
# Based on Detectron.pytorch/lib/roi_data/fast_rcnn.py
# Original license text:
# --------------------------------------------------------
# Copyright (c) 2017-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
"""Construct minibatches for Fast R-CNN training. Handles the minibatch blobs
that are specific to Fast R-CNN. Other blobs that are generic to RPN, etc.
are handled by their respecitive roi_data modules.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
import numpy.random as npr
import logging
from core.config import cfg
import utils_rel.boxes_rel as box_utils_rel
import utils.blob as blob_utils
import utils.fpn as fpn_utils
logger = logging.getLogger(__name__)
def add_rel_blobs(blobs, im_scales, roidb):
"""Add blobs needed for training Fast R-CNN style models."""
# Sample training RoIs from each image and append them to the blob lists
for im_i, entry in enumerate(roidb):
frcn_blobs = _sample_pairs(entry, im_scales[im_i], im_i)
for k, v in frcn_blobs.items():
blobs[k].append(v)
# Concat the training blob lists into tensors
for k, v in blobs.items():
if isinstance(v, list) and len(v) > 0:
blobs[k] = np.concatenate(v)
if cfg.FPN.FPN_ON and cfg.FPN.MULTILEVEL_ROIS:
_add_rel_multilevel_rois(blobs)
return True
def _sample_pairs(roidb, im_scale, batch_idx):
"""Generate a random sample of RoIs comprising foreground and background
examples.
"""
fg_pairs_per_image = cfg.TRAIN.FG_REL_SIZE_PER_IM
pairs_per_image = int(cfg.TRAIN.FG_REL_SIZE_PER_IM / cfg.TRAIN.FG_REL_FRACTION) # need much more pairs since it's quadratic
max_pair_overlaps = roidb['max_pair_overlaps']
gt_pair_inds = np.where(max_pair_overlaps > 1.0 - 1e-4)[0]
fg_pair_inds = np.where((max_pair_overlaps >= cfg.TRAIN.FG_THRESH) &
(max_pair_overlaps <= 1.0 - 1e-4))[0]
fg_pairs_per_this_image = np.minimum(fg_pairs_per_image, gt_pair_inds.size + fg_pair_inds.size)
# Sample foreground regions without replacement
if fg_pair_inds.size > 0:
fg_pair_inds = npr.choice(
fg_pair_inds, size=(fg_pairs_per_this_image - gt_pair_inds.size), replace=False)
fg_pair_inds = np.append(fg_pair_inds, gt_pair_inds)
# Label is the class each RoI has max overlap with
fg_prd_labels = roidb['max_prd_classes'][fg_pair_inds]
blob_dict = dict(
fg_prd_labels_int32=fg_prd_labels.astype(np.int32, copy=False))
if cfg.MODEL.USE_BG:
bg_pair_inds = np.where((max_pair_overlaps < cfg.TRAIN.BG_THRESH_HI))[0]
# Compute number of background RoIs to take from this image (guarding
# against there being fewer than desired)
bg_pairs_per_this_image = pairs_per_image - fg_pairs_per_this_image
bg_pairs_per_this_image = np.minimum(bg_pairs_per_this_image, bg_pair_inds.size)
# Sample foreground regions without replacement
if bg_pair_inds.size > 0:
bg_pair_inds = npr.choice(
bg_pair_inds, size=bg_pairs_per_this_image, replace=False)
keep_pair_inds = np.append(fg_pair_inds, bg_pair_inds)
all_prd_labels = np.zeros(keep_pair_inds.size, dtype=np.int32)
all_prd_labels[:fg_pair_inds.size] = fg_prd_labels + 1 # class should start from 1
else:
keep_pair_inds = fg_pair_inds
all_prd_labels = fg_prd_labels
blob_dict['all_prd_labels_int32'] = all_prd_labels.astype(np.int32, copy=False)
blob_dict['fg_size'] = np.array([fg_pair_inds.size], dtype=np.int32) # this is used to check if there is at least one fg to learn
sampled_sbj_boxes = roidb['sbj_boxes'][keep_pair_inds]
sampled_obj_boxes = roidb['obj_boxes'][keep_pair_inds]
# Scale rois and format as (batch_idx, x1, y1, x2, y2)
sampled_sbj_rois = sampled_sbj_boxes * im_scale
sampled_obj_rois = sampled_obj_boxes * im_scale
repeated_batch_idx = batch_idx * blob_utils.ones((keep_pair_inds.shape[0], 1))
sampled_sbj_rois = np.hstack((repeated_batch_idx, sampled_sbj_rois))
sampled_obj_rois = np.hstack((repeated_batch_idx, sampled_obj_rois))
blob_dict['sbj_rois'] = sampled_sbj_rois
blob_dict['obj_rois'] = sampled_obj_rois
sampled_rel_rois = box_utils_rel.rois_union(sampled_sbj_rois, sampled_obj_rois)
blob_dict['rel_rois'] = sampled_rel_rois
if cfg.MODEL.USE_SPATIAL_FEAT:
sampled_spt_feat = box_utils_rel.get_spt_features(
sampled_sbj_boxes, sampled_obj_boxes, roidb['width'], roidb['height'])
blob_dict['spt_feat'] = sampled_spt_feat
if cfg.MODEL.USE_FREQ_BIAS:
sbj_labels = roidb['max_sbj_classes'][keep_pair_inds]
obj_labels = roidb['max_obj_classes'][keep_pair_inds]
blob_dict['all_sbj_labels_int32'] = sbj_labels.astype(np.int32, copy=False)
blob_dict['all_obj_labels_int32'] = obj_labels.astype(np.int32, copy=False)
if cfg.MODEL.USE_NODE_CONTRASTIVE_LOSS or cfg.MODEL.USE_NODE_CONTRASTIVE_SO_AWARE_LOSS or cfg.MODEL.USE_NODE_CONTRASTIVE_P_AWARE_LOSS:
nodes_per_image = cfg.MODEL.NODE_SAMPLE_SIZE
max_sbj_overlaps = roidb['max_sbj_overlaps']
max_obj_overlaps = roidb['max_obj_overlaps']
# sbj
# Here a naturally existing assumption is, each positive sbj should have at least one positive obj
sbj_pos_pair_pos_inds = np.where((max_pair_overlaps >= cfg.TRAIN.FG_THRESH))[0]
sbj_pos_obj_pos_pair_neg_inds = np.where((max_sbj_overlaps >= cfg.TRAIN.FG_THRESH) &
(max_obj_overlaps >= cfg.TRAIN.FG_THRESH) &
(max_pair_overlaps < cfg.TRAIN.BG_THRESH_HI))[0]
sbj_pos_obj_neg_pair_neg_inds = np.where((max_sbj_overlaps >= cfg.TRAIN.FG_THRESH) &
(max_obj_overlaps < cfg.TRAIN.FG_THRESH) &
(max_pair_overlaps < cfg.TRAIN.BG_THRESH_HI))[0]
if sbj_pos_pair_pos_inds.size > 0:
sbj_pos_pair_pos_inds = npr.choice(
sbj_pos_pair_pos_inds,
size=int(min(nodes_per_image, sbj_pos_pair_pos_inds.size)),
replace=False)
if sbj_pos_obj_pos_pair_neg_inds.size > 0:
sbj_pos_obj_pos_pair_neg_inds = npr.choice(
sbj_pos_obj_pos_pair_neg_inds,
size=int(min(nodes_per_image, sbj_pos_obj_pos_pair_neg_inds.size)),
replace=False)
sbj_pos_pair_neg_inds = sbj_pos_obj_pos_pair_neg_inds
if nodes_per_image - sbj_pos_obj_pos_pair_neg_inds.size > 0 and sbj_pos_obj_neg_pair_neg_inds.size > 0:
sbj_pos_obj_neg_pair_neg_inds = npr.choice(
sbj_pos_obj_neg_pair_neg_inds,
size=int(min(nodes_per_image - sbj_pos_obj_pos_pair_neg_inds.size, sbj_pos_obj_neg_pair_neg_inds.size)),
replace=False)
sbj_pos_pair_neg_inds = np.append(sbj_pos_pair_neg_inds, sbj_pos_obj_neg_pair_neg_inds)
sbj_pos_inds = np.append(sbj_pos_pair_pos_inds, sbj_pos_pair_neg_inds)
binary_labels_sbj_pos = np.zeros(sbj_pos_inds.size, dtype=np.int32)
binary_labels_sbj_pos[:sbj_pos_pair_pos_inds.size] = 1
blob_dict['binary_labels_sbj_pos_int32'] = binary_labels_sbj_pos.astype(np.int32, copy=False)
prd_pos_labels_sbj_pos = roidb['max_prd_classes'][sbj_pos_pair_pos_inds]
prd_labels_sbj_pos = np.zeros(sbj_pos_inds.size, dtype=np.int32)
prd_labels_sbj_pos[:sbj_pos_pair_pos_inds.size] = prd_pos_labels_sbj_pos + 1
blob_dict['prd_labels_sbj_pos_int32'] = prd_labels_sbj_pos.astype(np.int32, copy=False)
sbj_labels_sbj_pos = roidb['max_sbj_classes'][sbj_pos_inds] + 1
# 1. set all obj labels > 0
obj_labels_sbj_pos = roidb['max_obj_classes'][sbj_pos_inds] + 1
# 2. find those negative obj
max_obj_overlaps_sbj_pos = roidb['max_obj_overlaps'][sbj_pos_inds]
obj_neg_inds_sbj_pos = np.where(max_obj_overlaps_sbj_pos < cfg.TRAIN.FG_THRESH)[0]
obj_labels_sbj_pos[obj_neg_inds_sbj_pos] = 0
blob_dict['sbj_labels_sbj_pos_int32'] = sbj_labels_sbj_pos.astype(np.int32, copy=False)
blob_dict['obj_labels_sbj_pos_int32'] = obj_labels_sbj_pos.astype(np.int32, copy=False)
# this is for freq bias in RelDN
blob_dict['sbj_labels_sbj_pos_fg_int32'] = roidb['max_sbj_classes'][sbj_pos_inds].astype(np.int32, copy=False)
blob_dict['obj_labels_sbj_pos_fg_int32'] = roidb['max_obj_classes'][sbj_pos_inds].astype(np.int32, copy=False)
sampled_sbj_boxes_sbj_pos = roidb['sbj_boxes'][sbj_pos_inds]
sampled_obj_boxes_sbj_pos = roidb['obj_boxes'][sbj_pos_inds]
# Scale rois and format as (batch_idx, x1, y1, x2, y2)
sampled_sbj_rois_sbj_pos = sampled_sbj_boxes_sbj_pos * im_scale
sampled_obj_rois_sbj_pos = sampled_obj_boxes_sbj_pos * im_scale
repeated_batch_idx = batch_idx * blob_utils.ones((sbj_pos_inds.shape[0], 1))
sampled_sbj_rois_sbj_pos = np.hstack((repeated_batch_idx, sampled_sbj_rois_sbj_pos))
sampled_obj_rois_sbj_pos = np.hstack((repeated_batch_idx, sampled_obj_rois_sbj_pos))
blob_dict['sbj_rois_sbj_pos'] = sampled_sbj_rois_sbj_pos
blob_dict['obj_rois_sbj_pos'] = sampled_obj_rois_sbj_pos
sampled_rel_rois_sbj_pos = box_utils_rel.rois_union(sampled_sbj_rois_sbj_pos, sampled_obj_rois_sbj_pos)
blob_dict['rel_rois_sbj_pos'] = sampled_rel_rois_sbj_pos
_, inds_unique_sbj_pos, inds_reverse_sbj_pos = np.unique(
sampled_sbj_rois_sbj_pos, return_index=True, return_inverse=True, axis=0)
assert inds_reverse_sbj_pos.shape[0] == sampled_sbj_rois_sbj_pos.shape[0]
blob_dict['inds_unique_sbj_pos'] = inds_unique_sbj_pos
blob_dict['inds_reverse_sbj_pos'] = inds_reverse_sbj_pos
if cfg.MODEL.USE_SPATIAL_FEAT:
sampled_spt_feat_sbj_pos = box_utils_rel.get_spt_features(
sampled_sbj_boxes_sbj_pos, sampled_obj_boxes_sbj_pos, roidb['width'], roidb['height'])
blob_dict['spt_feat_sbj_pos'] = sampled_spt_feat_sbj_pos
# obj
# Here a naturally existing assumption is, each positive obj should have at least one positive sbj
obj_pos_pair_pos_inds = np.where((max_pair_overlaps >= cfg.TRAIN.FG_THRESH))[0]
obj_pos_sbj_pos_pair_neg_inds = np.where((max_obj_overlaps >= cfg.TRAIN.FG_THRESH) &
(max_sbj_overlaps >= cfg.TRAIN.FG_THRESH) &
(max_pair_overlaps < cfg.TRAIN.BG_THRESH_HI))[0]
obj_pos_sbj_neg_pair_neg_inds = np.where((max_obj_overlaps >= cfg.TRAIN.FG_THRESH) &
(max_sbj_overlaps < cfg.TRAIN.FG_THRESH) &
(max_pair_overlaps < cfg.TRAIN.BG_THRESH_HI))[0]
if obj_pos_pair_pos_inds.size > 0:
obj_pos_pair_pos_inds = npr.choice(
obj_pos_pair_pos_inds,
size=int(min(nodes_per_image, obj_pos_pair_pos_inds.size)),
replace=False)
if obj_pos_sbj_pos_pair_neg_inds.size > 0:
obj_pos_sbj_pos_pair_neg_inds = npr.choice(
obj_pos_sbj_pos_pair_neg_inds,
size=int(min(nodes_per_image, obj_pos_sbj_pos_pair_neg_inds.size)),
replace=False)
obj_pos_pair_neg_inds = obj_pos_sbj_pos_pair_neg_inds
if nodes_per_image - obj_pos_sbj_pos_pair_neg_inds.size > 0 and obj_pos_sbj_neg_pair_neg_inds.size:
obj_pos_sbj_neg_pair_neg_inds = npr.choice(
obj_pos_sbj_neg_pair_neg_inds,
size=int(min(nodes_per_image - obj_pos_sbj_pos_pair_neg_inds.size, obj_pos_sbj_neg_pair_neg_inds.size)),
replace=False)
obj_pos_pair_neg_inds = np.append(obj_pos_pair_neg_inds, obj_pos_sbj_neg_pair_neg_inds)
obj_pos_inds = np.append(obj_pos_pair_pos_inds, obj_pos_pair_neg_inds)
binary_labels_obj_pos = np.zeros(obj_pos_inds.size, dtype=np.int32)
binary_labels_obj_pos[:obj_pos_pair_pos_inds.size] = 1
blob_dict['binary_labels_obj_pos_int32'] = binary_labels_obj_pos.astype(np.int32, copy=False)
prd_pos_labels_obj_pos = roidb['max_prd_classes'][obj_pos_pair_pos_inds]
prd_labels_obj_pos = np.zeros(obj_pos_inds.size, dtype=np.int32)
prd_labels_obj_pos[:obj_pos_pair_pos_inds.size] = prd_pos_labels_obj_pos + 1
blob_dict['prd_labels_obj_pos_int32'] = prd_labels_obj_pos.astype(np.int32, copy=False)
obj_labels_obj_pos = roidb['max_obj_classes'][obj_pos_inds] + 1
# 1. set all sbj labels > 0
sbj_labels_obj_pos = roidb['max_sbj_classes'][obj_pos_inds] + 1
# 2. find those negative sbj
max_sbj_overlaps_obj_pos = roidb['max_sbj_overlaps'][obj_pos_inds]
sbj_neg_inds_obj_pos = np.where(max_sbj_overlaps_obj_pos < cfg.TRAIN.FG_THRESH)[0]
sbj_labels_obj_pos[sbj_neg_inds_obj_pos] = 0
blob_dict['sbj_labels_obj_pos_int32'] = sbj_labels_obj_pos.astype(np.int32, copy=False)
blob_dict['obj_labels_obj_pos_int32'] = obj_labels_obj_pos.astype(np.int32, copy=False)
# this is for freq bias in RelDN
blob_dict['sbj_labels_obj_pos_fg_int32'] = roidb['max_sbj_classes'][obj_pos_inds].astype(np.int32, copy=False)
blob_dict['obj_labels_obj_pos_fg_int32'] = roidb['max_obj_classes'][obj_pos_inds].astype(np.int32, copy=False)
sampled_sbj_boxes_obj_pos = roidb['sbj_boxes'][obj_pos_inds]
sampled_obj_boxes_obj_pos = roidb['obj_boxes'][obj_pos_inds]
# Scale rois and format as (batch_idx, x1, y1, x2, y2)
sampled_sbj_rois_obj_pos = sampled_sbj_boxes_obj_pos * im_scale
sampled_obj_rois_obj_pos = sampled_obj_boxes_obj_pos * im_scale
repeated_batch_idx = batch_idx * blob_utils.ones((obj_pos_inds.shape[0], 1))
sampled_sbj_rois_obj_pos = np.hstack((repeated_batch_idx, sampled_sbj_rois_obj_pos))
sampled_obj_rois_obj_pos = np.hstack((repeated_batch_idx, sampled_obj_rois_obj_pos))
blob_dict['sbj_rois_obj_pos'] = sampled_sbj_rois_obj_pos
blob_dict['obj_rois_obj_pos'] = sampled_obj_rois_obj_pos
sampled_rel_rois_obj_pos = box_utils_rel.rois_union(sampled_sbj_rois_obj_pos, sampled_obj_rois_obj_pos)
blob_dict['rel_rois_obj_pos'] = sampled_rel_rois_obj_pos
_, inds_unique_obj_pos, inds_reverse_obj_pos = np.unique(
sampled_obj_rois_obj_pos, return_index=True, return_inverse=True, axis=0)
assert inds_reverse_obj_pos.shape[0] == sampled_obj_rois_obj_pos.shape[0]
blob_dict['inds_unique_obj_pos'] = inds_unique_obj_pos
blob_dict['inds_reverse_obj_pos'] = inds_reverse_obj_pos
if cfg.MODEL.USE_SPATIAL_FEAT:
sampled_spt_feat_obj_pos = box_utils_rel.get_spt_features(
sampled_sbj_boxes_obj_pos, sampled_obj_boxes_obj_pos, roidb['width'], roidb['height'])
blob_dict['spt_feat_obj_pos'] = sampled_spt_feat_obj_pos
return blob_dict
def _add_rel_multilevel_rois(blobs):
"""By default training RoIs are added for a single feature map level only.
When using FPN, the RoIs must be distributed over different FPN levels
according the level assignment heuristic (see: modeling.FPN.
map_rois_to_fpn_levels).
"""
lvl_min = cfg.FPN.ROI_MIN_LEVEL
lvl_max = cfg.FPN.ROI_MAX_LEVEL
def _distribute_rois_over_fpn_levels(rois_blob_names):
"""Distribute rois over the different FPN levels."""
# Get target level for each roi
# Recall blob rois are in (batch_idx, x1, y1, x2, y2) format, hence take
# the box coordinates from columns 1:5
lowest_target_lvls = None
for rois_blob_name in rois_blob_names:
target_lvls = fpn_utils.map_rois_to_fpn_levels(
blobs[rois_blob_name][:, 1:5], lvl_min, lvl_max)
if lowest_target_lvls is None:
lowest_target_lvls = target_lvls
else:
lowest_target_lvls = np.minimum(lowest_target_lvls, target_lvls)
for rois_blob_name in rois_blob_names:
# Add per FPN level roi blobs named like: <rois_blob_name>_fpn<lvl>
fpn_utils.add_multilevel_roi_blobs(
blobs, rois_blob_name, blobs[rois_blob_name], lowest_target_lvls, lvl_min,
lvl_max)
_distribute_rois_over_fpn_levels(['sbj_rois'])
_distribute_rois_over_fpn_levels(['obj_rois'])
_distribute_rois_over_fpn_levels(['rel_rois'])
if cfg.MODEL.USE_NODE_CONTRASTIVE_LOSS or cfg.MODEL.USE_NODE_CONTRASTIVE_SO_AWARE_LOSS or cfg.MODEL.USE_NODE_CONTRASTIVE_P_AWARE_LOSS:
_distribute_rois_over_fpn_levels(['sbj_rois_sbj_pos'])
_distribute_rois_over_fpn_levels(['obj_rois_sbj_pos'])
_distribute_rois_over_fpn_levels(['rel_rois_sbj_pos'])
_distribute_rois_over_fpn_levels(['sbj_rois_obj_pos'])
_distribute_rois_over_fpn_levels(['obj_rois_obj_pos'])
_distribute_rois_over_fpn_levels(['rel_rois_obj_pos'])
| ContrastiveLosses4VRD-master | lib/roi_data_rel/fast_rcnn_rel.py |
ContrastiveLosses4VRD-master | lib/roi_data_rel/__init__.py |
|
# Adapted by Ji Zhang for this project in 2019
#
# Based on Detectron.Pytorch/lib/roi/loader.py by Roy Tseng
import math
import numpy as np
import numpy.random as npr
import torch
import torch.utils.data as data
import torch.utils.data.sampler as torch_sampler
from torch.utils.data.dataloader import default_collate
from torch._six import int_classes as _int_classes
from core.config import cfg
from roi_data_rel.minibatch_rel import get_minibatch
import utils.blob as blob_utils
class RoiDataLoader(data.Dataset):
def __init__(self, roidb, num_classes, training=True):
self._roidb = roidb
self._num_classes = num_classes
self.training = training
self.DATA_SIZE = len(self._roidb)
def __getitem__(self, index_tuple):
index, ratio = index_tuple
single_db = [self._roidb[index]]
blobs, valid = get_minibatch(single_db)
#TODO: Check if minibatch is valid ? If not, abandon it.
# Need to change _worker_loop in torch.utils.data.dataloader.py.
# Squeeze batch dim
for key in blobs:
if key != 'roidb':
blobs[key] = blobs[key].squeeze(axis=0)
if self._roidb[index]['need_crop']:
self.crop_data(blobs, ratio)
# Check bounding box
entry = blobs['roidb'][0]
boxes = entry['boxes']
invalid = (boxes[:, 0] == boxes[:, 2]) | (boxes[:, 1] == boxes[:, 3])
valid_inds = np.nonzero(~ invalid)[0]
if len(valid_inds) < len(boxes):
for key in ['boxes', 'gt_classes', 'seg_areas', 'gt_overlaps', 'is_crowd',
'box_to_gt_ind_map', 'gt_keypoints']:
if key in entry:
entry[key] = entry[key][valid_inds]
entry['segms'] = [entry['segms'][ind] for ind in valid_inds]
# for rel sanity check
sbj_gt_boxes = entry['sbj_gt_boxes']
obj_gt_boxes = entry['obj_gt_boxes']
sbj_invalid = (sbj_gt_boxes[:, 0] == sbj_gt_boxes[:, 2]) | (sbj_gt_boxes[:, 1] == sbj_gt_boxes[:, 3])
obj_invalid = (obj_gt_boxes[:, 0] == obj_gt_boxes[:, 2]) | (obj_gt_boxes[:, 1] == obj_gt_boxes[:, 3])
rel_valid = sbj_invalid | obj_invalid
rel_valid_inds = np.nonzero(~ rel_invalid)[0]
if len(rel_valid_inds) < len(sbj_gt_boxes):
for key in ['sbj_gt_boxes', 'sbj_gt_classes', 'obj_gt_boxes', 'obj_gt_classes', 'prd_gt_classes',
'sbj_gt_overlaps', 'obj_gt_overlaps', 'prd_gt_overlaps', 'pair_to_gt_ind_map',
'width', 'height']:
if key in entry:
entry[key] = entry[key][rel_valid_inds]
blobs['roidb'] = blob_utils.serialize(blobs['roidb']) # CHECK: maybe we can serialize in collate_fn
return blobs
def crop_data(self, blobs, ratio):
data_height, data_width = map(int, blobs['im_info'][:2])
boxes = blobs['roidb'][0]['boxes']
if ratio < 1: # width << height, crop height
size_crop = math.ceil(data_width / ratio) # size after crop
min_y = math.floor(np.min(boxes[:, 1]))
max_y = math.floor(np.max(boxes[:, 3]))
box_region = max_y - min_y + 1
if min_y == 0:
y_s = 0
else:
if (box_region - size_crop) < 0:
y_s_min = max(max_y - size_crop, 0)
y_s_max = min(min_y, data_height - size_crop)
y_s = y_s_min if y_s_min == y_s_max else \
npr.choice(range(y_s_min, y_s_max + 1))
else:
# CHECK: rethinking the mechnism for the case box_region > size_crop
# Now, the crop is biased on the lower part of box_region caused by
# // 2 for y_s_add
y_s_add = (box_region - size_crop) // 2
y_s = min_y if y_s_add == 0 else \
npr.choice(range(min_y, min_y + y_s_add + 1))
# Crop the image
blobs['data'] = blobs['data'][:, y_s:(y_s + size_crop), :,]
# Update im_info
blobs['im_info'][0] = size_crop
# Shift and clamp boxes ground truth
boxes[:, 1] -= y_s
boxes[:, 3] -= y_s
np.clip(boxes[:, 1], 0, size_crop - 1, out=boxes[:, 1])
np.clip(boxes[:, 3], 0, size_crop - 1, out=boxes[:, 3])
blobs['roidb'][0]['boxes'] = boxes
else: # width >> height, crop width
size_crop = math.ceil(data_height * ratio)
min_x = math.floor(np.min(boxes[:, 0]))
max_x = math.floor(np.max(boxes[:, 2]))
box_region = max_x - min_x + 1
if min_x == 0:
x_s = 0
else:
if (box_region - size_crop) < 0:
x_s_min = max(max_x - size_crop, 0)
x_s_max = min(min_x, data_width - size_crop)
x_s = x_s_min if x_s_min == x_s_max else \
npr.choice(range(x_s_min, x_s_max + 1))
else:
x_s_add = (box_region - size_crop) // 2
x_s = min_x if x_s_add == 0 else \
npr.choice(range(min_x, min_x + x_s_add + 1))
# Crop the image
blobs['data'] = blobs['data'][:, :, x_s:(x_s + size_crop)]
# Update im_info
blobs['im_info'][1] = size_crop
# Shift and clamp boxes ground truth
boxes[:, 0] -= x_s
boxes[:, 2] -= x_s
np.clip(boxes[:, 0], 0, size_crop - 1, out=boxes[:, 0])
np.clip(boxes[:, 2], 0, size_crop - 1, out=boxes[:, 2])
blobs['roidb'][0]['boxes'] = boxes
def __len__(self):
return self.DATA_SIZE
def cal_minibatch_ratio(ratio_list):
"""Given the ratio_list, we want to make the RATIO same for each minibatch on each GPU.
Note: this only work for 1) cfg.TRAIN.MAX_SIZE is ignored during `prep_im_for_blob`
and 2) cfg.TRAIN.SCALES containing SINGLE scale.
Since all prepared images will have same min side length of cfg.TRAIN.SCALES[0], we can
pad and batch images base on that.
"""
DATA_SIZE = len(ratio_list)
ratio_list_minibatch = np.empty((DATA_SIZE,))
num_minibatch = int(np.ceil(DATA_SIZE / cfg.TRAIN.IMS_PER_BATCH)) # Include leftovers
for i in range(num_minibatch):
left_idx = i * cfg.TRAIN.IMS_PER_BATCH
right_idx = min((i+1) * cfg.TRAIN.IMS_PER_BATCH - 1, DATA_SIZE - 1)
if ratio_list[right_idx] < 1:
# for ratio < 1, we preserve the leftmost in each batch.
target_ratio = ratio_list[left_idx]
elif ratio_list[left_idx] > 1:
# for ratio > 1, we preserve the rightmost in each batch.
target_ratio = ratio_list[right_idx]
else:
# for ratio cross 1, we make it to be 1.
target_ratio = 1
ratio_list_minibatch[left_idx:(right_idx+1)] = target_ratio
return ratio_list_minibatch
class MinibatchSampler(torch_sampler.Sampler):
def __init__(self, ratio_list, ratio_index):
self.ratio_list = ratio_list
self.ratio_index = ratio_index
self.num_data = len(ratio_list)
if cfg.TRAIN.ASPECT_GROUPING:
# Given the ratio_list, we want to make the ratio same
# for each minibatch on each GPU.
self.ratio_list_minibatch = cal_minibatch_ratio(ratio_list)
def __iter__(self):
if cfg.TRAIN.ASPECT_GROUPING:
# indices for aspect grouping awared permutation
n, rem = divmod(self.num_data, cfg.TRAIN.IMS_PER_BATCH)
round_num_data = n * cfg.TRAIN.IMS_PER_BATCH
indices = np.arange(round_num_data)
npr.shuffle(indices.reshape(-1, cfg.TRAIN.IMS_PER_BATCH)) # inplace shuffle
if rem != 0:
indices = np.append(indices, np.arange(round_num_data, round_num_data + rem))
ratio_index = self.ratio_index[indices]
ratio_list_minibatch = self.ratio_list_minibatch[indices]
else:
rand_perm = npr.permutation(self.num_data)
ratio_list = self.ratio_list[rand_perm]
ratio_index = self.ratio_index[rand_perm]
# re-calculate minibatch ratio list
ratio_list_minibatch = cal_minibatch_ratio(ratio_list)
return iter(zip(ratio_index.tolist(), ratio_list_minibatch.tolist()))
def __len__(self):
return self.num_data
class BatchSampler(torch_sampler.BatchSampler):
r"""Wraps another sampler to yield a mini-batch of indices.
Args:
sampler (Sampler): Base sampler.
batch_size (int): Size of mini-batch.
drop_last (bool): If ``True``, the sampler will drop the last batch if
its size would be less than ``batch_size``
Example:
>>> list(BatchSampler(range(10), batch_size=3, drop_last=False))
[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9]]
>>> list(BatchSampler(range(10), batch_size=3, drop_last=True))
[[0, 1, 2], [3, 4, 5], [6, 7, 8]]
"""
def __init__(self, sampler, batch_size, drop_last):
if not isinstance(sampler, torch_sampler.Sampler):
raise ValueError("sampler should be an instance of "
"torch.utils.data.Sampler, but got sampler={}"
.format(sampler))
if not isinstance(batch_size, _int_classes) or isinstance(batch_size, bool) or \
batch_size <= 0:
raise ValueError("batch_size should be a positive integeral value, "
"but got batch_size={}".format(batch_size))
if not isinstance(drop_last, bool):
raise ValueError("drop_last should be a boolean value, but got "
"drop_last={}".format(drop_last))
self.sampler = sampler
self.batch_size = batch_size
self.drop_last = drop_last
def __iter__(self):
batch = []
for idx in self.sampler:
batch.append(idx) # Difference: batch.append(int(idx))
if len(batch) == self.batch_size:
yield batch
batch = []
if len(batch) > 0 and not self.drop_last:
yield batch
def __len__(self):
if self.drop_last:
return len(self.sampler) // self.batch_size
else:
return (len(self.sampler) + self.batch_size - 1) // self.batch_size
def collate_minibatch(list_of_blobs):
"""Stack samples seperately and return a list of minibatches
A batch contains NUM_GPUS minibatches and image size in different minibatch may be different.
Hence, we need to stack smaples from each minibatch seperately.
"""
Batch = {key: [] for key in list_of_blobs[0]}
# Because roidb consists of entries of variable length, it can't be batch into a tensor.
# So we keep roidb in the type of "list of ndarray".
list_of_roidb = [blobs.pop('roidb') for blobs in list_of_blobs]
for i in range(0, len(list_of_blobs), cfg.TRAIN.IMS_PER_BATCH):
mini_list = list_of_blobs[i:(i + cfg.TRAIN.IMS_PER_BATCH)]
# Pad image data
mini_list = pad_image_data(mini_list)
minibatch = default_collate(mini_list)
minibatch['roidb'] = list_of_roidb[i:(i + cfg.TRAIN.IMS_PER_BATCH)]
for key in minibatch:
Batch[key].append(minibatch[key])
return Batch
def pad_image_data(list_of_blobs):
max_shape = blob_utils.get_max_shape([blobs['data'].shape[1:] for blobs in list_of_blobs])
output_list = []
for blobs in list_of_blobs:
data_padded = np.zeros((3, max_shape[0], max_shape[1]), dtype=np.float32)
_, h, w = blobs['data'].shape
data_padded[:, :h, :w] = blobs['data']
blobs['data'] = data_padded
output_list.append(blobs)
return output_list
| ContrastiveLosses4VRD-master | lib/roi_data_rel/loader_rel.py |
# Adapted by Ji Zhang in 2019
#
# Based on Detectron.pytorch/lib/utils/net.py written by Roy Tseng
import logging
import os
import numpy as np
import torch
import torch.nn.functional as F
from torch.autograd import Variable
from core.config import cfg
from utils.net import _get_lr_change_ratio
from utils.net import _CorrectMomentum
logger = logging.getLogger(__name__)
def update_learning_rate_att(optimizer, cur_lr, new_lr):
"""Update learning rate"""
if cur_lr != new_lr:
ratio = _get_lr_change_ratio(cur_lr, new_lr)
if ratio > cfg.SOLVER.LOG_LR_CHANGE_THRESHOLD:
logger.info('Changing learning rate %.6f -> %.6f', cur_lr, new_lr)
# Update learning rate, note that different parameter may have different learning rate
param_keys = []
for ind, param_group in enumerate(optimizer.param_groups):
if (ind == 1 or ind == 3) and cfg.SOLVER.BIAS_DOUBLE_LR: # bias params
param_group['lr'] = new_lr * 2
else:
param_group['lr'] = new_lr
if ind <= 1: # backbone params
param_group['lr'] = cfg.SOLVER.BACKBONE_LR_SCALAR * param_group['lr'] # 0.1 * param_group['lr']
param_keys += param_group['params']
if cfg.SOLVER.TYPE in ['SGD'] and cfg.SOLVER.SCALE_MOMENTUM and cur_lr > 1e-7 and \
ratio > cfg.SOLVER.SCALE_MOMENTUM_THRESHOLD:
_CorrectMomentum(optimizer, param_keys, new_lr / cur_lr)
def update_learning_rate_rel(optimizer, cur_lr, new_lr):
"""Update learning rate"""
if cur_lr != new_lr:
ratio = _get_lr_change_ratio(cur_lr, new_lr)
if ratio > cfg.SOLVER.LOG_LR_CHANGE_THRESHOLD:
logger.info('Changing learning rate %.6f -> %.6f', cur_lr, new_lr)
# Update learning rate, note that different parameter may have different learning rate
param_keys = []
for ind, param_group in enumerate(optimizer.param_groups):
if (ind == 1 or ind == 3) and cfg.SOLVER.BIAS_DOUBLE_LR: # bias params
param_group['lr'] = new_lr * 2
else:
param_group['lr'] = new_lr
if ind <= 1: # backbone params
param_group['lr'] = cfg.SOLVER.BACKBONE_LR_SCALAR * param_group['lr'] # 0.1 * param_group['lr']
param_keys += param_group['params']
if cfg.SOLVER.TYPE in ['SGD'] and cfg.SOLVER.SCALE_MOMENTUM and cur_lr > 1e-7 and \
ratio > cfg.SOLVER.SCALE_MOMENTUM_THRESHOLD:
_CorrectMomentum(optimizer, param_keys, new_lr / cur_lr)
def load_ckpt_rel(model, ckpt):
"""Load checkpoint"""
model.load_state_dict(ckpt, strict=False)
| ContrastiveLosses4VRD-master | lib/utils_rel/net_rel.py |
ContrastiveLosses4VRD-master | lib/utils_rel/__init__.py |
|
# Adapted by Ji Zhang in 2019 for thsi project
# Based on Detectron.pytorch/lib/utils/training_stats.py
# Original license text below:
#
##############################################################################
# Copyright (c) 2017-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
"""Utilities for training."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from collections import defaultdict, OrderedDict
import datetime
import numpy as np
from core.config import cfg
from utils_rel.logging_rel import log_stats
from utils_rel.logging_rel import SmoothedValue
from utils.timer import Timer
import utils.net as nu
class TrainingStats(object):
"""Track vital training statistics."""
def __init__(self, misc_args, log_period=20, tensorboard_logger=None):
# Output logging period in SGD iterations
self.misc_args = misc_args
self.LOG_PERIOD = log_period
self.tblogger = tensorboard_logger
self.tb_ignored_keys = ['iter', 'eta']
self.iter_timer = Timer()
# Window size for smoothing tracked values (with median filtering)
self.WIN_SZ = 20
def create_smoothed_value():
return SmoothedValue(self.WIN_SZ)
self.smoothed_losses = defaultdict(create_smoothed_value)
self.smoothed_metrics = defaultdict(create_smoothed_value)
self.smoothed_total_loss = SmoothedValue(self.WIN_SZ)
# For the support of args.iter_size
self.inner_total_loss = []
self.inner_losses = defaultdict(list)
if cfg.FPN.FPN_ON:
self.inner_loss_rpn_cls = []
self.inner_loss_rpn_bbox = []
self.inner_metrics = defaultdict(list)
def IterTic(self):
self.iter_timer.tic()
def IterToc(self):
return self.iter_timer.toc(average=False)
def ResetIterTimer(self):
self.iter_timer.reset()
def UpdateIterStats(self, model_out, inner_iter=None):
"""Update tracked iteration statistics."""
if inner_iter is not None and self.misc_args.iter_size > 1:
# For the case of using args.iter_size > 1
return self._UpdateIterStats_inner(model_out, inner_iter)
# Following code is saved for compatability of train_net.py and iter_size==1
total_loss = 0
if cfg.FPN.FPN_ON:
loss_rpn_cls_data = 0
loss_rpn_bbox_data = 0
for k, loss in model_out['losses'].items():
assert loss.shape[0] == cfg.NUM_GPUS
loss = loss.mean(dim=0, keepdim=True)
total_loss += loss
loss_data = loss.data[0]
model_out['losses'][k] = loss
if cfg.FPN.FPN_ON:
if k.startswith('loss_rpn_cls_'):
loss_rpn_cls_data += loss_data
elif k.startswith('loss_rpn_bbox_'):
loss_rpn_bbox_data += loss_data
self.smoothed_losses[k].AddValue(loss_data)
model_out['total_loss'] = total_loss # Add the total loss for back propagation
self.smoothed_total_loss.AddValue(total_loss.data[0])
if cfg.FPN.FPN_ON:
self.smoothed_losses['loss_rpn_cls'].AddValue(loss_rpn_cls_data)
self.smoothed_losses['loss_rpn_bbox'].AddValue(loss_rpn_bbox_data)
for k, metric in model_out['metrics'].items():
metric = metric.mean(dim=0, keepdim=True)
self.smoothed_metrics[k].AddValue(metric.data[0])
def _UpdateIterStats_inner(self, model_out, inner_iter):
"""Update tracked iteration statistics for the case of iter_size > 1"""
assert inner_iter < self.misc_args.iter_size
total_loss = 0
if cfg.FPN.FPN_ON:
loss_rpn_cls_data = 0
loss_rpn_bbox_data = 0
if inner_iter == 0:
self.inner_total_loss = []
for k in model_out['losses']:
self.inner_losses[k] = []
if cfg.FPN.FPN_ON:
self.inner_loss_rpn_cls = []
self.inner_loss_rpn_bbox = []
for k in model_out['metrics']:
self.inner_metrics[k] = []
for k, loss in model_out['losses'].items():
assert loss.shape[0] == cfg.NUM_GPUS
loss = loss.mean(dim=0, keepdim=True)
total_loss += loss
loss_data = loss.data[0]
model_out['losses'][k] = loss
if cfg.FPN.FPN_ON:
if k.startswith('loss_rpn_cls_'):
loss_rpn_cls_data += loss_data
elif k.startswith('loss_rpn_bbox_'):
loss_rpn_bbox_data += loss_data
self.inner_losses[k].append(loss_data)
if inner_iter == (self.misc_args.iter_size - 1):
loss_data = self._mean_and_reset_inner_list('inner_losses', k)
self.smoothed_losses[k].AddValue(loss_data)
model_out['total_loss'] = total_loss # Add the total loss for back propagation
total_loss_data = total_loss.data[0]
self.inner_total_loss.append(total_loss_data)
if cfg.FPN.FPN_ON:
self.inner_loss_rpn_cls.append(loss_rpn_cls_data)
self.inner_loss_rpn_bbox.append(loss_rpn_bbox_data)
if inner_iter == (self.misc_args.iter_size - 1):
total_loss_data = self._mean_and_reset_inner_list('inner_total_loss')
self.smoothed_total_loss.AddValue(total_loss_data)
if cfg.FPN.FPN_ON:
loss_rpn_cls_data = self._mean_and_reset_inner_list('inner_loss_rpn_cls')
loss_rpn_bbox_data = self._mean_and_reset_inner_list('inner_loss_rpn_bbox')
self.smoothed_losses['loss_rpn_cls'].AddValue(loss_rpn_cls_data)
self.smoothed_losses['loss_rpn_bbox'].AddValue(loss_rpn_bbox_data)
for k, metric in model_out['metrics'].items():
metric = metric.mean(dim=0, keepdim=True)
metric_data = metric.data[0]
self.inner_metrics[k].append(metric_data)
if inner_iter == (self.misc_args.iter_size - 1):
metric_data = self._mean_and_reset_inner_list('inner_metrics', k)
self.smoothed_metrics[k].AddValue(metric_data)
def _mean_and_reset_inner_list(self, attr_name, key=None):
"""Take the mean and reset list empty"""
if key:
mean_val = sum(getattr(self, attr_name)[key]) / self.misc_args.iter_size
getattr(self, attr_name)[key] = []
else:
mean_val = sum(getattr(self, attr_name)) / self.misc_args.iter_size
setattr(self, attr_name, [])
return mean_val
def LogIterStats(self, cur_iter, lr, backbone_lr):
"""Log the tracked statistics."""
if (cur_iter % self.LOG_PERIOD == 0 or
cur_iter == cfg.SOLVER.MAX_ITER - 1):
stats = self.GetStats(cur_iter, lr, backbone_lr)
log_stats(stats, self.misc_args)
if self.tblogger:
self.tb_log_stats(stats, cur_iter)
def tb_log_stats(self, stats, cur_iter):
"""Log the tracked statistics to tensorboard"""
for k in stats:
if k not in self.tb_ignored_keys:
v = stats[k]
if isinstance(v, dict):
self.tb_log_stats(v, cur_iter)
else:
self.tblogger.add_scalar(k, v, cur_iter)
def GetStats(self, cur_iter, lr, backbone_lr):
eta_seconds = self.iter_timer.average_time * (
cfg.SOLVER.MAX_ITER - cur_iter
)
eta = str(datetime.timedelta(seconds=int(eta_seconds)))
stats = OrderedDict(
iter=cur_iter + 1, # 1-indexed
time=self.iter_timer.average_time,
eta=eta,
loss=self.smoothed_total_loss.GetMedianValue(),
lr=lr,
backbone_lr=backbone_lr
)
stats['metrics'] = OrderedDict()
for k in sorted(self.smoothed_metrics):
stats['metrics'][k] = self.smoothed_metrics[k].GetMedianValue()
head_losses = []
for k, v in self.smoothed_losses.items():
head_losses.append((k, v.GetMedianValue()))
stats['head_losses'] = OrderedDict(head_losses)
return stats
| ContrastiveLosses4VRD-master | lib/utils_rel/training_stats_rel.py |
# Adapted by Ji Zhang for this project in 2019
# Based on Detectron.pytorch/lib/utils/logging.py
# Original license text below:
#
############################################################################
# Copyright (c) 2017-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
"""Utilities for logging."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from collections import deque
from email.mime.text import MIMEText
import json
import logging
import numpy as np
import smtplib
import sys
from core.config import cfg
# Print lower precision floating point values than default FLOAT_REPR
# Note! Has no use for json encode with C speedups
json.encoder.FLOAT_REPR = lambda o: format(o, '.6f')
def log_json_stats(stats, sort_keys=True):
print('json_stats: {:s}'.format(json.dumps(stats, sort_keys=sort_keys)))
def log_stats(stats, misc_args):
"""Log training statistics to terminal"""
if hasattr(misc_args, 'epoch'):
lines = "[%s][%s][Epoch %d][Iter %d / %d]\n" % (
misc_args.run_name, misc_args.cfg_filename,
misc_args.epoch, misc_args.step, misc_args.iters_per_epoch)
else:
lines = "[%s][%s][Step %d / %d]\n" % (
misc_args.run_name, misc_args.cfg_filename, stats['iter'], cfg.SOLVER.MAX_ITER)
lines += "\t\tloss: %.6f, lr: %.6f backbone_lr: %.6f time: %.6f, eta: %s\n" % (
stats['loss'], stats['lr'], stats['backbone_lr'], stats['time'], stats['eta']
)
if stats['metrics']:
lines += "\t\t" + ", ".join("%s: %.6f" % (k, v) for k, v in stats['metrics'].items()) + "\n"
if stats['head_losses']:
lines += "\t\t" + ", ".join("%s: %.6f" % (k, v) for k, v in stats['head_losses'].items()) + "\n"
print(lines[:-1]) # remove last new line
class SmoothedValue(object):
"""Track a series of values and provide access to smoothed values over a
window or the global series average.
"""
def __init__(self, window_size):
self.deque = deque(maxlen=window_size)
self.series = []
self.total = 0.0
self.count = 0
def AddValue(self, value):
self.deque.append(value)
self.series.append(value)
self.count += 1
self.total += value
def GetMedianValue(self):
return np.median(self.deque)
def GetAverageValue(self):
return np.mean(self.deque)
def GetGlobalAverageValue(self):
return self.total / self.count
def send_email(subject, body, to):
s = smtplib.SMTP('localhost')
mime = MIMEText(body)
mime['Subject'] = subject
mime['To'] = to
s.sendmail('detectron', to, mime.as_string())
def setup_logging(name):
FORMAT = '%(levelname)s %(filename)s:%(lineno)4d: %(message)s'
# Manually clear root loggers to prevent any module that may have called
# logging.basicConfig() from blocking our logging setup
logging.root.handlers = []
logging.basicConfig(level=logging.INFO, format=FORMAT, stream=sys.stdout)
logger = logging.getLogger(name)
return logger
| ContrastiveLosses4VRD-master | lib/utils_rel/logging_rel.py |
# Adapted by Ji Zhang in 2019
# Based on Detectron.pytorch/lib/utils/subprocess.py
# Original license text below:
#
#############################################################################
#
# Copyright (c) 2017-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
"""Primitives for running multiple single-GPU jobs in parallel over subranges of
data. These are used for running multi-GPU inference. Subprocesses are used to
avoid the GIL since inference may involve non-trivial amounts of Python code.
"""
from io import IOBase
import logging
import os
import subprocess
from six.moves import shlex_quote
from six.moves import cPickle as pickle
import yaml
import numpy as np
import torch
from core.config import cfg
logger = logging.getLogger(__name__)
def process_in_parallel(
tag, total_range_size, binary, output_dir,
load_ckpt, load_detectron, opts=''):
"""Run the specified binary NUM_GPUS times in parallel, each time as a
subprocess that uses one GPU. The binary must accept the command line
arguments `--range {start} {end}` that specify a data processing range.
"""
# Snapshot the current cfg state in order to pass to the inference
# subprocesses
cfg_file = os.path.join(output_dir, '{}_range_config.yaml'.format(tag))
with open(cfg_file, 'w') as f:
yaml.dump(cfg, stream=f)
subprocess_env = os.environ.copy()
processes = []
NUM_GPUS = torch.cuda.device_count()
subinds = np.array_split(range(total_range_size), NUM_GPUS)
# Determine GPUs to use
cuda_visible_devices = os.environ.get('CUDA_VISIBLE_DEVICES')
if cuda_visible_devices:
gpu_inds = list(map(int, cuda_visible_devices.split(',')))
assert -1 not in gpu_inds, \
'Hiding GPU indices using the \'-1\' index is not supported'
else:
gpu_inds = range(cfg.NUM_GPUS)
gpu_inds = list(gpu_inds)
# Run the binary in cfg.NUM_GPUS subprocesses
for i, gpu_ind in enumerate(gpu_inds):
start = subinds[i][0]
end = subinds[i][-1] + 1
subprocess_env['CUDA_VISIBLE_DEVICES'] = str(gpu_ind)
cmd = ('python3 {binary} --range {start} {end} --cfg {cfg_file} --set {opts} '
'--output_dir {output_dir}')
if load_ckpt is not None:
cmd += ' --load_ckpt {load_ckpt}'
elif load_detectron is not None:
cmd += ' --load_detectron {load_detectron}'
cmd = cmd.format(
binary=shlex_quote(binary),
start=int(start),
end=int(end),
cfg_file=shlex_quote(cfg_file),
output_dir=output_dir,
load_ckpt=load_ckpt,
load_detectron=load_detectron,
opts=' '.join([shlex_quote(opt) for opt in opts])
)
logger.info('{} range command {}: {}'.format(tag, i, cmd))
if i == 0:
subprocess_stdout = subprocess.PIPE
else:
filename = os.path.join(
output_dir, '%s_range_%s_%s.stdout' % (tag, start, end)
)
subprocess_stdout = open(filename, 'w')
p = subprocess.Popen(
cmd,
shell=True,
env=subprocess_env,
stdout=subprocess_stdout,
stderr=subprocess.STDOUT,
bufsize=1
)
processes.append((i, p, start, end, subprocess_stdout))
# Log output from inference processes and collate their results
outputs = []
for i, p, start, end, subprocess_stdout in processes:
log_subprocess_output(i, p, output_dir, tag, start, end)
if isinstance(subprocess_stdout, IOBase):
subprocess_stdout.close()
range_file = os.path.join(
output_dir, '%s_range_%s_%s.pkl' % (tag, start, end)
)
range_data = pickle.load(open(range_file, 'rb'))
outputs.append(range_data)
return outputs
def log_subprocess_output(i, p, output_dir, tag, start, end):
"""Capture the output of each subprocess and log it in the parent process.
The first subprocess's output is logged in realtime. The output from the
other subprocesses is buffered and then printed all at once (in order) when
subprocesses finish.
"""
outfile = os.path.join(
output_dir, '%s_range_%s_%s.stdout' % (tag, start, end)
)
logger.info('# ' + '-' * 76 + ' #')
logger.info(
'stdout of subprocess %s with range [%s, %s]' % (i, start + 1, end)
)
logger.info('# ' + '-' * 76 + ' #')
if i == 0:
# Stream the piped stdout from the first subprocess in realtime
with open(outfile, 'w') as f:
for line in iter(p.stdout.readline, b''):
print(line.rstrip().decode('ascii'))
f.write(str(line, encoding='ascii'))
p.stdout.close()
ret = p.wait()
else:
# For subprocesses >= 1, wait and dump their log file
ret = p.wait()
with open(outfile, 'r') as f:
print(''.join(f.readlines()))
assert ret == 0, 'Range subprocess failed (exit code: {})'.format(ret)
| ContrastiveLosses4VRD-master | lib/utils_rel/subprocess_rel.py |
# Adapted by Ji Zhang in 2019 for this project
# Based on Detectron.pytorch/lib/utils/boxes.py
#
# Original license text below:
#
#############################################################################
# Copyright (c) 2017-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
#
# Based on:
# --------------------------------------------------------
# Fast/er R-CNN
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""Box manipulation functions. The internal Detectron box format is
[x1, y1, x2, y2] where (x1, y1) specify the top-left box corner and (x2, y2)
specify the bottom-right box corner. Boxes from external sources, e.g.,
datasets, may be in other formats (such as [x, y, w, h]) and require conversion.
This module uses a convention that may seem strange at first: the width of a box
is computed as x2 - x1 + 1 (likewise for height). The "+ 1" dates back to old
object detection days when the coordinates were integer pixel indices, rather
than floating point coordinates in a subpixel coordinate frame. A box with x2 =
x1 and y2 = y1 was taken to include a single pixel, having a width of 1, and
hence requiring the "+ 1". Now, most datasets will likely provide boxes with
floating point coordinates and the width should be more reasonably computed as
x2 - x1.
In practice, as long as a model is trained and tested with a consistent
convention either decision seems to be ok (at least in our experience on COCO).
Since we have a long history of training models with the "+ 1" convention, we
are reluctant to change it even if our modern tastes prefer not to use it.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import warnings
import numpy as np
from core.config import cfg
import utils_rel.cython_bbox_rel as cython_bbox_rel
from utils.boxes import bbox_transform_inv
bbox_pair_overlaps = cython_bbox_rel.bbox_pair_overlaps
def get_spt_features(boxes1, boxes2, width, height):
boxes_u = boxes_union(boxes1, boxes2)
spt_feat_1 = get_box_feature(boxes1, width, height)
spt_feat_2 = get_box_feature(boxes2, width, height)
spt_feat_12 = get_pair_feature(boxes1, boxes2)
spt_feat_1u = get_pair_feature(boxes1, boxes_u)
spt_feat_u2 = get_pair_feature(boxes_u, boxes2)
return np.hstack((spt_feat_12, spt_feat_1u, spt_feat_u2, spt_feat_1, spt_feat_2))
def get_pair_feature(boxes1, boxes2):
delta_1 = bbox_transform_inv(boxes1, boxes2)
delta_2 = bbox_transform_inv(boxes2, boxes1)
spt_feat = np.hstack((delta_1, delta_2[:, :2]))
return spt_feat
def get_box_feature(boxes, width, height):
f1 = boxes[:, 0] / width
f2 = boxes[:, 1] / height
f3 = boxes[:, 2] / width
f4 = boxes[:, 3] / height
f5 = (boxes[:, 2] - boxes[:, 0] + 1) * (boxes[:, 3] - boxes[:, 1] + 1) / (width * height)
return np.vstack((f1, f2, f3, f4, f5)).transpose()
def boxes_union(boxes1, boxes2):
assert boxes1.shape == boxes2.shape
xmin = np.minimum(boxes1[:, 0], boxes2[:, 0])
ymin = np.minimum(boxes1[:, 1], boxes2[:, 1])
xmax = np.maximum(boxes1[:, 2], boxes2[:, 2])
ymax = np.maximum(boxes1[:, 3], boxes2[:, 3])
return np.vstack((xmin, ymin, xmax, ymax)).transpose()
def rois_union(rois1, rois2):
assert (rois1[:, 0] == rois2[:, 0]).all()
xmin = np.minimum(rois1[:, 1], rois2[:, 1])
ymin = np.minimum(rois1[:, 2], rois2[:, 2])
xmax = np.maximum(rois1[:, 3], rois2[:, 3])
ymax = np.maximum(rois1[:, 4], rois2[:, 4])
return np.vstack((rois1[:, 0], xmin, ymin, xmax, ymax)).transpose()
def boxes_intersect(boxes1, boxes2):
assert boxes1.shape == boxes2.shape
xmin = np.maximum(boxes1[:, 0], boxes2[:, 0])
ymin = np.maximum(boxes1[:, 1], boxes2[:, 1])
xmax = np.minimum(boxes1[:, 2], boxes2[:, 2])
ymax = np.minimum(boxes1[:, 3], boxes2[:, 3])
return np.vstack((xmin, ymin, xmax, ymax)).transpose()
def rois_intersect(rois1, rois2):
assert (rois1[:, 0] == rois2[:, 0]).all()
xmin = np.maximum(rois1[:, 1], rois2[:, 1])
ymin = np.maximum(rois1[:, 2], rois2[:, 2])
xmax = np.minimum(rois1[:, 3], rois2[:, 3])
ymax = np.minimum(rois1[:, 4], rois2[:, 4])
return np.vstack((rois1[:, 0], xmin, ymin, xmax, ymax)).transpose()
def y1y2x1x2_to_x1y1x2y2(y1y2x1x2):
x1 = y1y2x1x2[2]
y1 = y1y2x1x2[0]
x2 = y1y2x1x2[3]
y2 = y1y2x1x2[1]
return [x1, y1, x2, y2]
| ContrastiveLosses4VRD-master | lib/utils_rel/boxes_rel.py |
"""
Written by Ji Zhang, 2019
Some functions are adapted from Rowan Zellers
Original source:
https://github.com/rowanz/neural-motifs/blob/master/lib/evaluation/sg_eval.py
"""
import os
import numpy as np
import logging
from six.moves import cPickle as pickle
import json
import csv
from tqdm import tqdm
from core.config import cfg
from functools import reduce
from utils.boxes import bbox_overlaps
from datasets_rel.ap_eval_rel import ap_eval, prepare_mAP_dets
from .pytorch_misc import intersect_2d, argsort_desc
np.set_printoptions(precision=3)
logger = logging.getLogger(__name__)
def eval_rel_results(all_results, output_dir, do_val=True, do_vis=False, do_special=False):
topk = 100
if cfg.TEST.DATASETS[0].find('vg') >= 0:
eval_per_img = True
# eval_per_img = False
prd_k = 1
else:
eval_per_img = False
prd_k = 2
if cfg.TEST.DATASETS[0].find('oi') >= 0:
eval_ap = True
else:
eval_ap = False
if eval_per_img:
recalls = {1: [], 5: [], 10: [], 20: [], 50: [], 100: []}
else:
recalls = {1: 0, 5: 0, 10: 0, 20: 0, 50: 0, 100: 0}
if do_val:
all_gt_cnt = 0
if do_special:
special_img_f = open("/home/jiz/projects/100_img_special_set.txt", "r")
special_imgs = special_img_f.readlines()
special_imgs = [img[:-1] for img in special_imgs]
special_img_set = set(special_imgs)
logger.info('Special images len: {}'.format(len(special_img_set)))
topk_dets = []
for im_i, res in enumerate(tqdm(all_results)):
if do_special:
img_id = res['image'].split('/')[-1].split('.')[0]
if img_id not in special_img_set:
continue
# in oi_all_rel some images have no dets
if res['prd_scores'] is None:
det_boxes_s_top = np.zeros((0, 4), dtype=np.float32)
det_boxes_o_top = np.zeros((0, 4), dtype=np.float32)
det_labels_s_top = np.zeros(0, dtype=np.int32)
det_labels_p_top = np.zeros(0, dtype=np.int32)
det_labels_o_top = np.zeros(0, dtype=np.int32)
det_scores_top = np.zeros(0, dtype=np.float32)
det_scores_top_vis = np.zeros(0, dtype=np.float32)
if 'prd_scores_bias' in res:
det_scores_top_bias = np.zeros(0, dtype=np.float32)
if 'prd_scores_spt' in res:
det_scores_top_spt = np.zeros(0, dtype=np.float32)
else:
det_boxes_sbj = res['sbj_boxes'] # (#num_rel, 4)
det_boxes_obj = res['obj_boxes'] # (#num_rel, 4)
det_labels_sbj = res['sbj_labels'] # (#num_rel,)
det_labels_obj = res['obj_labels'] # (#num_rel,)
det_scores_sbj = res['sbj_scores'] # (#num_rel,)
det_scores_obj = res['obj_scores'] # (#num_rel,)
if 'prd_scores_ttl' in res:
det_scores_prd = res['prd_scores_ttl'][:, 1:]
else:
det_scores_prd = res['prd_scores'][:, 1:]
det_labels_prd = np.argsort(-det_scores_prd, axis=1)
det_scores_prd = -np.sort(-det_scores_prd, axis=1)
det_scores_so = det_scores_sbj * det_scores_obj
det_scores_spo = det_scores_so[:, None] * det_scores_prd[:, :prd_k]
det_scores_inds = argsort_desc(det_scores_spo)[:topk]
det_scores_top = det_scores_spo[det_scores_inds[:, 0], det_scores_inds[:, 1]]
det_boxes_so_top = np.hstack(
(det_boxes_sbj[det_scores_inds[:, 0]], det_boxes_obj[det_scores_inds[:, 0]]))
det_labels_p_top = det_labels_prd[det_scores_inds[:, 0], det_scores_inds[:, 1]]
det_labels_spo_top = np.vstack(
(det_labels_sbj[det_scores_inds[:, 0]], det_labels_p_top, det_labels_obj[det_scores_inds[:, 0]])).transpose()
# filter out bad relationships
cand_inds = np.where(det_scores_top > cfg.TEST.SPO_SCORE_THRESH)[0]
det_boxes_so_top = det_boxes_so_top[cand_inds]
det_labels_spo_top = det_labels_spo_top[cand_inds]
det_scores_top = det_scores_top[cand_inds]
det_scores_vis = res['prd_scores'][:, 1:]
for i in range(det_labels_prd.shape[0]):
det_scores_vis[i] = det_scores_vis[i][det_labels_prd[i]]
det_scores_vis = det_scores_vis[:, :prd_k]
det_scores_top_vis = det_scores_vis[det_scores_inds[:, 0], det_scores_inds[:, 1]]
det_scores_top_vis = det_scores_top_vis[cand_inds]
if 'prd_scores_bias' in res:
det_scores_bias = res['prd_scores_bias'][:, 1:]
for i in range(det_labels_prd.shape[0]):
det_scores_bias[i] = det_scores_bias[i][det_labels_prd[i]]
det_scores_bias = det_scores_bias[:, :prd_k]
det_scores_top_bias = det_scores_bias[det_scores_inds[:, 0], det_scores_inds[:, 1]]
det_scores_top_bias = det_scores_top_bias[cand_inds]
if 'prd_scores_spt' in res:
det_scores_spt = res['prd_scores_spt'][:, 1:]
for i in range(det_labels_prd.shape[0]):
det_scores_spt[i] = det_scores_spt[i][det_labels_prd[i]]
det_scores_spt = det_scores_spt[:, :prd_k]
det_scores_top_spt = det_scores_spt[det_scores_inds[:, 0], det_scores_inds[:, 1]]
det_scores_top_spt = det_scores_top_spt[cand_inds]
det_boxes_s_top = det_boxes_so_top[:, :4]
det_boxes_o_top = det_boxes_so_top[:, 4:]
det_labels_s_top = det_labels_spo_top[:, 0]
det_labels_p_top = det_labels_spo_top[:, 1]
det_labels_o_top = det_labels_spo_top[:, 2]
topk_dets.append(dict(image=res['image'],
det_boxes_s_top=det_boxes_s_top,
det_boxes_o_top=det_boxes_o_top,
det_labels_s_top=det_labels_s_top,
det_labels_p_top=det_labels_p_top,
det_labels_o_top=det_labels_o_top,
det_scores_top=det_scores_top))
topk_dets[-1]['det_scores_top_vis'] = det_scores_top_vis
if 'prd_scores_bias' in res:
topk_dets[-1]['det_scores_top_bias'] = det_scores_top_bias
if 'prd_scores_spt' in res:
topk_dets[-1]['det_scores_top_spt'] = det_scores_top_spt
if do_vis:
topk_dets[-1].update(dict(blob_conv=res['blob_conv'],
blob_conv_prd=res['blob_conv_prd']))
if do_val:
gt_boxes_sbj = res['gt_sbj_boxes'] # (#num_gt, 4)
gt_boxes_obj = res['gt_obj_boxes'] # (#num_gt, 4)
gt_labels_sbj = res['gt_sbj_labels'] # (#num_gt,)
gt_labels_obj = res['gt_obj_labels'] # (#num_gt,)
gt_labels_prd = res['gt_prd_labels'] # (#num_gt,)
gt_boxes_so = np.hstack((gt_boxes_sbj, gt_boxes_obj))
gt_labels_spo = np.vstack((gt_labels_sbj, gt_labels_prd, gt_labels_obj)).transpose()
# Compute recall. It's most efficient to match once and then do recall after
# det_boxes_so_top is (#num_rel, 8)
# det_labels_spo_top is (#num_rel, 3)
pred_to_gt = _compute_pred_matches(
gt_labels_spo, det_labels_spo_top,
gt_boxes_so, det_boxes_so_top)
if eval_per_img:
for k in recalls:
if len(pred_to_gt):
match = reduce(np.union1d, pred_to_gt[:k])
else:
match = []
rec_i = float(len(match)) / float(gt_labels_spo.shape[0] + 1e-12) # in case there is no gt
recalls[k].append(rec_i)
else:
all_gt_cnt += gt_labels_spo.shape[0]
for k in recalls:
if len(pred_to_gt):
match = reduce(np.union1d, pred_to_gt[:k])
else:
match = []
recalls[k] += len(match)
topk_dets[-1].update(dict(gt_boxes_sbj=gt_boxes_sbj,
gt_boxes_obj=gt_boxes_obj,
gt_labels_sbj=gt_labels_sbj,
gt_labels_obj=gt_labels_obj,
gt_labels_prd=gt_labels_prd))
if do_val:
if eval_per_img:
for k, v in recalls.items():
recalls[k] = np.mean(v)
else:
for k in recalls:
recalls[k] = float(recalls[k]) / (float(all_gt_cnt) + 1e-12)
excel_str = print_stats(recalls)
if eval_ap:
# prepare dets for each class
logger.info('Preparing dets for mAP...')
cls_image_ids, cls_dets, cls_gts, npos = prepare_mAP_dets(topk_dets, 9)
all_npos = sum(npos)
with open(cfg.DATA_DIR + '/openimages_v4/rel/rel_9_predicates.json') as f:
rel_prd_cats = json.load(f)
rel_mAP = 0.
w_rel_mAP = 0.
ap_str = ''
for c in range(9):
rec, prec, ap = ap_eval(cls_image_ids[c], cls_dets[c], cls_gts[c], npos[c], True)
weighted_ap = ap * float(npos[c]) / float(all_npos)
w_rel_mAP += weighted_ap
rel_mAP += ap
ap_str += '{:.2f}, '.format(100 * ap)
print('rel AP for class {}: {:.2f} ({:.6f})'.format(rel_prd_cats[c], 100 * ap, float(npos[c]) / float(all_npos)))
rel_mAP /= 9.
print('weighted rel mAP: {:.2f}'.format(100 * w_rel_mAP))
excel_str += ap_str
phr_mAP = 0.
w_phr_mAP = 0.
ap_str = ''
for c in range(9):
rec, prec, ap = ap_eval(cls_image_ids[c], cls_dets[c], cls_gts[c], npos[c], False)
weighted_ap = ap * float(npos[c]) / float(all_npos)
w_phr_mAP += weighted_ap
phr_mAP += ap
ap_str += '{:.2f}, '.format(100 * ap)
print('phr AP for class {}: {:.2f} ({:.6f})'.format(rel_prd_cats[c], 100 * ap, float(npos[c]) / float(all_npos)))
phr_mAP /= 9.
print('weighted phr mAP: {:.2f}'.format(100 * w_phr_mAP))
excel_str += ap_str
# total: 0.4 x rel_mAP + 0.2 x R@50 + 0.4 x phr_mAP
final_score = 0.4 * rel_mAP + 0.2 * recalls[50] + 0.4 * phr_mAP
# total: 0.4 x w_rel_mAP + 0.2 x R@50 + 0.4 x w_phr_mAP
w_final_score = 0.4 * w_rel_mAP + 0.2 * recalls[50] + 0.4 * w_phr_mAP
print('weighted final_score: {:.2f}'.format(100 * w_final_score))
# get excel friendly string
# excel_str = '{:.2f}, {:.2f}, {:.2f}, {:.2f}, '.format(100 * recalls[50], 100 * w_rel_mAP, 100 * w_phr_mAP, 100 * w_final_score) + excel_str
# print('Excel-friendly format:')
# print(excel_str.strip()[:-1])
# print('Saving topk dets...')
# topk_dets_f = os.path.join(output_dir, 'rel_detections_topk.pkl')
# with open(topk_dets_f, 'wb') as f:
# pickle.dump(topk_dets, f, pickle.HIGHEST_PROTOCOL)
# logger.info('topk_dets size: {}'.format(len(topk_dets)))
print('Done.')
def print_stats(recalls):
# print('====================== ' + 'sgdet' + ' ============================')
k_str = ''
for k in recalls.keys():
if k == 50:
continue
k_str += '{}\t'.format(k)
v_str = ''
for k, v in recalls.items():
print('R@%i: %.2f' % (k, 100 * v))
if k == 50:
continue
v_str += '{:.2f}, '.format(100 * v)
return v_str
# This function is adapted from Rowan Zellers' code:
# https://github.com/rowanz/neural-motifs/blob/master/lib/evaluation/sg_eval.py
# Modified for this project to work with PyTorch v0.4
def _compute_pred_matches(gt_triplets, pred_triplets,
gt_boxes, pred_boxes, iou_thresh=0.5, phrdet=False):
"""
Given a set of predicted triplets, return the list of matching GT's for each of the
given predictions
:param gt_triplets:
:param pred_triplets:
:param gt_boxes:
:param pred_boxes:
:param iou_thresh: Do y
:return:
"""
# This performs a matrix multiplication-esque thing between the two arrays
# Instead of summing, we want the equality, so we reduce in that way
# The rows correspond to GT triplets, columns to pred triplets
keeps = intersect_2d(gt_triplets, pred_triplets)
gt_has_match = keeps.any(1)
pred_to_gt = [[] for x in range(pred_boxes.shape[0])]
for gt_ind, gt_box, keep_inds in zip(np.where(gt_has_match)[0],
gt_boxes[gt_has_match],
keeps[gt_has_match],
):
boxes = pred_boxes[keep_inds]
if phrdet:
# Evaluate where the union box > 0.5
gt_box_union = gt_box.reshape((2, 4))
gt_box_union = np.concatenate((gt_box_union.min(0)[:2], gt_box_union.max(0)[2:]), 0)
box_union = boxes.reshape((-1, 2, 4))
box_union = np.concatenate((box_union.min(1)[:,:2], box_union.max(1)[:,2:]), 1)
gt_box_union = gt_box_union.astype(dtype=np.float32, copy=False)
box_union = box_union.astype(dtype=np.float32, copy=False)
inds = bbox_overlaps(gt_box_union[None],
box_union = box_union)[0] >= iou_thresh
else:
gt_box = gt_box.astype(dtype=np.float32, copy=False)
boxes = boxes.astype(dtype=np.float32, copy=False)
sub_iou = bbox_overlaps(gt_box[None,:4], boxes[:, :4])[0]
obj_iou = bbox_overlaps(gt_box[None,4:], boxes[:, 4:])[0]
inds = (sub_iou >= iou_thresh) & (obj_iou >= iou_thresh)
for i in np.where(keep_inds)[0][inds]:
pred_to_gt[i].append(int(gt_ind))
return pred_to_gt
| ContrastiveLosses4VRD-master | lib/datasets_rel/task_evaluation_sg.py |
# Adapted from Detectron.pytorch/lib/datasets/roidb.py
# for this project by Ji Zhang, 2019
#
# --------------------------------------------------------
# Copyright (c) 2017-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
"""Functions for common roidb manipulations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import six
import logging
import numpy as np
import utils.boxes as box_utils
import utils.blob as blob_utils
from core.config import cfg
from .json_dataset_rel import JsonDatasetRel
logger = logging.getLogger(__name__)
def combined_roidb_for_training(dataset_names, proposal_files):
"""Load and concatenate roidbs for one or more datasets, along with optional
object proposals. The roidb entries are then prepared for use in training,
which involves caching certain types of metadata for each roidb entry.
"""
def get_roidb(dataset_name, proposal_file):
ds = JsonDatasetRel(dataset_name)
roidb = ds.get_roidb(
gt=True,
proposal_file=proposal_file,
crowd_filter_thresh=cfg.TRAIN.CROWD_FILTER_THRESH
)
if cfg.TRAIN.USE_FLIPPED:
logger.info('Appending horizontally-flipped training examples...')
extend_with_flipped_entries(roidb, ds)
logger.info('Loaded dataset: {:s}'.format(ds.name))
return roidb
if isinstance(dataset_names, six.string_types):
dataset_names = (dataset_names, )
if isinstance(proposal_files, six.string_types):
proposal_files = (proposal_files, )
if len(proposal_files) == 0:
proposal_files = (None, ) * len(dataset_names)
assert len(dataset_names) == len(proposal_files)
roidbs = [get_roidb(*args) for args in zip(dataset_names, proposal_files)]
roidb = roidbs[0]
for r in roidbs[1:]:
roidb.extend(r)
roidb = filter_for_training(roidb)
if cfg.TRAIN.ASPECT_GROUPING or cfg.TRAIN.ASPECT_CROPPING:
logger.info('Computing image aspect ratios and ordering the ratios...')
ratio_list, ratio_index = rank_for_training(roidb)
logger.info('done')
else:
ratio_list, ratio_index = None, None
logger.info('Computing bounding-box regression targets...')
add_bbox_regression_targets(roidb)
logger.info('done')
_compute_and_log_stats(roidb)
return roidb, ratio_list, ratio_index
def extend_with_flipped_entries(roidb, dataset):
"""Flip each entry in the given roidb and return a new roidb that is the
concatenation of the original roidb and the flipped entries.
"Flipping" an entry means that that image and associated metadata (e.g.,
ground truth boxes and object proposals) are horizontally flipped.
"""
flipped_roidb = []
for entry in roidb:
width = entry['width']
boxes = entry['boxes'].copy()
oldx1 = boxes[:, 0].copy()
oldx2 = boxes[:, 2].copy()
boxes[:, 0] = width - oldx2 - 1
boxes[:, 2] = width - oldx1 - 1
assert (boxes[:, 2] >= boxes[:, 0]).all()
# sbj
sbj_gt_boxes = entry['sbj_gt_boxes'].copy()
oldx1 = sbj_gt_boxes[:, 0].copy()
oldx2 = sbj_gt_boxes[:, 2].copy()
sbj_gt_boxes[:, 0] = width - oldx2 - 1
sbj_gt_boxes[:, 2] = width - oldx1 - 1
assert (sbj_gt_boxes[:, 2] >= sbj_gt_boxes[:, 0]).all()
# obj
obj_gt_boxes = entry['obj_gt_boxes'].copy()
oldx1 = obj_gt_boxes[:, 0].copy()
oldx2 = obj_gt_boxes[:, 2].copy()
obj_gt_boxes[:, 0] = width - oldx2 - 1
obj_gt_boxes[:, 2] = width - oldx1 - 1
assert (obj_gt_boxes[:, 2] >= obj_gt_boxes[:, 0]).all()
# now flip
flipped_entry = {}
dont_copy = ('boxes', 'sbj_gt_boxes', 'obj_gt_boxes', 'segms', 'gt_keypoints', 'flipped')
for k, v in entry.items():
if k not in dont_copy:
flipped_entry[k] = v
flipped_entry['boxes'] = boxes
flipped_entry['sbj_gt_boxes'] = sbj_gt_boxes
flipped_entry['obj_gt_boxes'] = obj_gt_boxes
flipped_entry['flipped'] = True
flipped_roidb.append(flipped_entry)
roidb.extend(flipped_roidb)
def filter_for_training(roidb):
"""Remove roidb entries that have no usable RoIs based on config settings.
"""
def is_valid(entry):
# Valid images have:
# (1) At least one foreground RoI OR
# (2) At least one background RoI
overlaps = entry['max_overlaps']
# find boxes with sufficient overlap
fg_inds = np.where(overlaps >= cfg.TRAIN.FG_THRESH)[0]
# Select background RoIs as those within [BG_THRESH_LO, BG_THRESH_HI)
bg_inds = np.where((overlaps < cfg.TRAIN.BG_THRESH_HI) &
(overlaps >= cfg.TRAIN.BG_THRESH_LO))[0]
# image is only valid if such boxes exist
valid = len(fg_inds) > 0 or len(bg_inds) > 0
if cfg.MODEL.KEYPOINTS_ON:
# If we're training for keypoints, exclude images with no keypoints
valid = valid and entry['has_visible_keypoints']
return valid
num = len(roidb)
filtered_roidb = [entry for entry in roidb if is_valid(entry)]
num_after = len(filtered_roidb)
logger.info('Filtered {} roidb entries: {} -> {}'.
format(num - num_after, num, num_after))
return filtered_roidb
def rank_for_training(roidb):
"""Rank the roidb entries according to image aspect ration and mark for cropping
for efficient batching if image is too long.
Returns:
ratio_list: ndarray, list of aspect ratios from small to large
ratio_index: ndarray, list of roidb entry indices correspond to the ratios
"""
RATIO_HI = cfg.TRAIN.ASPECT_HI # largest ratio to preserve.
RATIO_LO = cfg.TRAIN.ASPECT_LO # smallest ratio to preserve.
need_crop_cnt = 0
ratio_list = []
for entry in roidb:
width = entry['width']
height = entry['height']
ratio = width / float(height)
if cfg.TRAIN.ASPECT_CROPPING:
if ratio > RATIO_HI:
entry['need_crop'] = True
ratio = RATIO_HI
need_crop_cnt += 1
elif ratio < RATIO_LO:
entry['need_crop'] = True
ratio = RATIO_LO
need_crop_cnt += 1
else:
entry['need_crop'] = False
else:
entry['need_crop'] = False
ratio_list.append(ratio)
if cfg.TRAIN.ASPECT_CROPPING:
logging.info('Number of entries that need to be cropped: %d. Ratio bound: [%.2f, %.2f]',
need_crop_cnt, RATIO_LO, RATIO_HI)
ratio_list = np.array(ratio_list)
ratio_index = np.argsort(ratio_list)
return ratio_list[ratio_index], ratio_index
def add_bbox_regression_targets(roidb):
"""Add information needed to train bounding-box regressors."""
for entry in roidb:
entry['bbox_targets'] = _compute_targets(entry)
def _compute_targets(entry):
"""Compute bounding-box regression targets for an image."""
# Indices of ground-truth ROIs
rois = entry['boxes']
overlaps = entry['max_overlaps']
labels = entry['max_classes']
gt_inds = np.where((entry['gt_classes'] > 0) & (entry['is_crowd'] == 0))[0]
# Targets has format (class, tx, ty, tw, th)
targets = np.zeros((rois.shape[0], 5), dtype=np.float32)
if len(gt_inds) == 0:
# Bail if the image has no ground-truth ROIs
return targets
# Indices of examples for which we try to make predictions
ex_inds = np.where(overlaps >= cfg.TRAIN.BBOX_THRESH)[0]
# Get IoU overlap between each ex ROI and gt ROI
ex_gt_overlaps = box_utils.bbox_overlaps(
rois[ex_inds, :].astype(dtype=np.float32, copy=False),
rois[gt_inds, :].astype(dtype=np.float32, copy=False))
# Find which gt ROI each ex ROI has max overlap with:
# this will be the ex ROI's gt target
gt_assignment = ex_gt_overlaps.argmax(axis=1)
gt_rois = rois[gt_inds[gt_assignment], :]
ex_rois = rois[ex_inds, :]
# Use class "1" for all boxes if using class_agnostic_bbox_reg
targets[ex_inds, 0] = (
1 if cfg.MODEL.CLS_AGNOSTIC_BBOX_REG else labels[ex_inds])
targets[ex_inds, 1:] = box_utils.bbox_transform_inv(
ex_rois, gt_rois, cfg.MODEL.BBOX_REG_WEIGHTS)
return targets
def _compute_and_log_stats(roidb):
classes = roidb[0]['dataset'].classes
char_len = np.max([len(c) for c in classes])
hist_bins = np.arange(len(classes) + 1)
# Histogram of ground-truth objects
gt_hist = np.zeros((len(classes)), dtype=np.int)
for entry in roidb:
gt_inds = np.where(
(entry['gt_classes'] > 0) & (entry['is_crowd'] == 0))[0]
gt_classes = entry['gt_classes'][gt_inds]
gt_hist += np.histogram(gt_classes, bins=hist_bins)[0]
logger.debug('Ground-truth class histogram:')
for i, v in enumerate(gt_hist):
logger.debug(
'{:d}{:s}: {:d}'.format(
i, classes[i].rjust(char_len), v))
logger.debug('-' * char_len)
logger.debug(
'{:s}: {:d}'.format(
'total'.rjust(char_len), np.sum(gt_hist)))
| ContrastiveLosses4VRD-master | lib/datasets_rel/roidb_rel.py |
"""
Written by Ji Zhang, 2019
Some functions are adapted from Rowan Zellers
Original source:
https://github.com/rowanz/neural-motifs/blob/master/lib/evaluation/sg_eval.py
"""
import os
import numpy as np
import logging
from six.moves import cPickle as pickle
import json
import csv
from tqdm import tqdm
from core.config import cfg
from functools import reduce
from utils.boxes import bbox_overlaps
from utils_rel.boxes_rel import boxes_union
from .pytorch_misc import intersect_2d, argsort_desc
np.set_printoptions(precision=3)
logger = logging.getLogger(__name__)
topk = 100
def eval_rel_results(all_results, output_dir, do_val):
if cfg.TEST.DATASETS[0].find('vg') >= 0:
prd_k_set = (1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 15, 20)
elif cfg.TEST.DATASETS[0].find('vrd') >= 0:
prd_k_set = (1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 15, 20, 70)
else:
prd_k_set = (1, 2, 3, 4, 5, 6, 7, 8, 9)
if cfg.TEST.DATASETS[0].find('vg') >= 0:
eval_sets = (False,)
else:
eval_sets = (False, True)
for phrdet in eval_sets:
eval_metric = 'phrdet' if phrdet else 'reldet'
print('================== {} =================='.format(eval_metric))
for prd_k in prd_k_set:
print('prd_k = {}:'.format(prd_k))
recalls = {20: 0, 50: 0, 100: 0}
if do_val:
all_gt_cnt = 0
topk_dets = []
for im_i, res in enumerate(tqdm(all_results)):
# in oi_all_rel some images have no dets
if res['prd_scores'] is None:
det_boxes_s_top = np.zeros((0, 4), dtype=np.float32)
det_boxes_o_top = np.zeros((0, 4), dtype=np.float32)
det_labels_s_top = np.zeros(0, dtype=np.int32)
det_labels_p_top = np.zeros(0, dtype=np.int32)
det_labels_o_top = np.zeros(0, dtype=np.int32)
det_scores_top = np.zeros(0, dtype=np.float32)
else:
det_boxes_sbj = res['sbj_boxes'] # (#num_rel, 4)
det_boxes_obj = res['obj_boxes'] # (#num_rel, 4)
det_labels_sbj = res['sbj_labels'] # (#num_rel,)
det_labels_obj = res['obj_labels'] # (#num_rel,)
det_scores_sbj = res['sbj_scores'] # (#num_rel,)
det_scores_obj = res['obj_scores'] # (#num_rel,)
if 'prd_scores_ttl' in res:
det_scores_prd = res['prd_scores_ttl'][:, 1:]
else:
det_scores_prd = res['prd_scores'][:, 1:]
det_labels_prd = np.argsort(-det_scores_prd, axis=1)
det_scores_prd = -np.sort(-det_scores_prd, axis=1)
det_scores_so = det_scores_sbj * det_scores_obj
det_scores_spo = det_scores_so[:, None] * det_scores_prd[:, :prd_k]
det_scores_inds = argsort_desc(det_scores_spo)[:topk]
det_scores_top = det_scores_spo[det_scores_inds[:, 0], det_scores_inds[:, 1]]
det_boxes_so_top = np.hstack(
(det_boxes_sbj[det_scores_inds[:, 0]], det_boxes_obj[det_scores_inds[:, 0]]))
det_labels_p_top = det_labels_prd[det_scores_inds[:, 0], det_scores_inds[:, 1]]
det_labels_spo_top = np.vstack(
(det_labels_sbj[det_scores_inds[:, 0]], det_labels_p_top, det_labels_obj[det_scores_inds[:, 0]])).transpose()
det_boxes_s_top = det_boxes_so_top[:, :4]
det_boxes_o_top = det_boxes_so_top[:, 4:]
det_labels_s_top = det_labels_spo_top[:, 0]
det_labels_p_top = det_labels_spo_top[:, 1]
det_labels_o_top = det_labels_spo_top[:, 2]
topk_dets.append(dict(image=res['image'],
det_boxes_s_top=det_boxes_s_top,
det_boxes_o_top=det_boxes_o_top,
det_labels_s_top=det_labels_s_top,
det_labels_p_top=det_labels_p_top,
det_labels_o_top=det_labels_o_top,
det_scores_top=det_scores_top))
if do_val:
gt_boxes_sbj = res['gt_sbj_boxes'] # (#num_gt, 4)
gt_boxes_obj = res['gt_obj_boxes'] # (#num_gt, 4)
gt_labels_sbj = res['gt_sbj_labels'] # (#num_gt,)
gt_labels_obj = res['gt_obj_labels'] # (#num_gt,)
gt_labels_prd = res['gt_prd_labels'] # (#num_gt,)
gt_boxes_so = np.hstack((gt_boxes_sbj, gt_boxes_obj))
gt_labels_spo = np.vstack((gt_labels_sbj, gt_labels_prd, gt_labels_obj)).transpose()
# Compute recall. It's most efficient to match once and then do recall after
# det_boxes_so_top is (#num_rel, 8)
# det_labels_spo_top is (#num_rel, 3)
if phrdet:
det_boxes_r_top = boxes_union(det_boxes_s_top, det_boxes_o_top)
gt_boxes_r = boxes_union(gt_boxes_sbj, gt_boxes_obj)
pred_to_gt = _compute_pred_matches(
gt_labels_spo, det_labels_spo_top,
gt_boxes_r, det_boxes_r_top,
phrdet=phrdet)
else:
pred_to_gt = _compute_pred_matches(
gt_labels_spo, det_labels_spo_top,
gt_boxes_so, det_boxes_so_top,
phrdet=phrdet)
all_gt_cnt += gt_labels_spo.shape[0]
for k in recalls:
if len(pred_to_gt):
match = reduce(np.union1d, pred_to_gt[:k])
else:
match = []
recalls[k] += len(match)
topk_dets[-1].update(dict(gt_boxes_sbj=gt_boxes_sbj,
gt_boxes_obj=gt_boxes_obj,
gt_labels_sbj=gt_labels_sbj,
gt_labels_obj=gt_labels_obj,
gt_labels_prd=gt_labels_prd))
if do_val:
for k in recalls:
recalls[k] = float(recalls[k]) / (float(all_gt_cnt) + 1e-12)
print_stats(recalls)
def print_stats(recalls):
# print('====================== ' + 'sgdet' + ' ============================')
for k, v in recalls.items():
print('R@%i: %.2f' % (k, 100 * v))
# This function is adapted from Rowan Zellers' code:
# https://github.com/rowanz/neural-motifs/blob/master/lib/evaluation/sg_eval.py
# Modified for this project to work with PyTorch v0.4
def _compute_pred_matches(gt_triplets, pred_triplets,
gt_boxes, pred_boxes, iou_thresh=0.5, phrdet=False):
"""
Given a set of predicted triplets, return the list of matching GT's for each of the
given predictions
:param gt_triplets:
:param pred_triplets:
:param gt_boxes:
:param pred_boxes:
:param iou_thresh:
:return:
"""
# This performs a matrix multiplication-esque thing between the two arrays
# Instead of summing, we want the equality, so we reduce in that way
# The rows correspond to GT triplets, columns to pred triplets
keeps = intersect_2d(gt_triplets, pred_triplets)
gt_has_match = keeps.any(1)
pred_to_gt = [[] for x in range(pred_boxes.shape[0])]
for gt_ind, gt_box, keep_inds in zip(np.where(gt_has_match)[0],
gt_boxes[gt_has_match],
keeps[gt_has_match],
):
boxes = pred_boxes[keep_inds]
if phrdet:
gt_box = gt_box.astype(dtype=np.float32, copy=False)
boxes = boxes.astype(dtype=np.float32, copy=False)
rel_iou = bbox_overlaps(gt_box[None, :], boxes)[0]
inds = rel_iou >= iou_thresh
else:
gt_box = gt_box.astype(dtype=np.float32, copy=False)
boxes = boxes.astype(dtype=np.float32, copy=False)
sub_iou = bbox_overlaps(gt_box[None,:4], boxes[:, :4])[0]
obj_iou = bbox_overlaps(gt_box[None,4:], boxes[:, 4:])[0]
inds = (sub_iou >= iou_thresh) & (obj_iou >= iou_thresh)
for i in np.where(keep_inds)[0][inds]:
pred_to_gt[i].append(int(gt_ind))
return pred_to_gt
| ContrastiveLosses4VRD-master | lib/datasets_rel/task_evaluation_vg_and_vrd.py |
ContrastiveLosses4VRD-master | lib/datasets_rel/__init__.py |
|
# Adapted from Detectron.pytorch/lib/datasets/dataset_catalog.py
# for this project by Ji Zhang,2019
#-----------------------------------------------------------------------------
# Copyright (c) 2017-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
"""Collection of available datasets."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
from core.config import cfg
# Path to data dir
_DATA_DIR = cfg.DATA_DIR
# Required dataset entry keys
IM_DIR = 'image_directory'
ANN_FN = 'annotation_file'
ANN_FN2 = 'annotation_file2'
ANN_FN3 = 'predicate_file'
# Optional dataset entry keys
IM_PREFIX = 'image_prefix'
DEVKIT_DIR = 'devkit_directory'
RAW_DIR = 'raw_dir'
# Available datasets
DATASETS = {
# OpenImages_v4 rel dataset for relationship task
'oi_rel_train': {
IM_DIR:
_DATA_DIR + '/openimages_v4/train',
ANN_FN:
_DATA_DIR + '/openimages_v4/rel/detections_train.json',
ANN_FN2:
_DATA_DIR + '/openimages_v4/rel/rel_only_annotations_train.json',
ANN_FN3:
_DATA_DIR + '/openimages_v4/rel/rel_9_predicates.json',
},
'oi_rel_train_mini': {
IM_DIR:
_DATA_DIR + '/openimages_v4/train',
ANN_FN:
_DATA_DIR + '/openimages_v4/rel/detections_train.json',
ANN_FN2:
_DATA_DIR + '/openimages_v4/rel/rel_only_annotations_train_mini.json',
ANN_FN3:
_DATA_DIR + '/openimages_v4/rel/rel_9_predicates.json',
},
'oi_rel_val': {
IM_DIR:
_DATA_DIR + '/openimages_v4/train',
ANN_FN:
_DATA_DIR + '/openimages_v4/rel/detections_val.json',
ANN_FN2:
_DATA_DIR + '/openimages_v4/rel/rel_only_annotations_val.json',
ANN_FN3:
_DATA_DIR + '/openimages_v4/rel/rel_9_predicates.json',
},
'oi_rel_val_mini': {
IM_DIR:
_DATA_DIR + '/openimages_v4/train',
ANN_FN:
_DATA_DIR + '/openimages_v4/rel/detections_val.json',
ANN_FN2:
_DATA_DIR + '/openimages_v4/rel/rel_only_annotations_val_mini.json',
ANN_FN3:
_DATA_DIR + '/openimages_v4/rel/rel_9_predicates.json',
},
# for Kaggle test
'oi_kaggle_rel_test': {
IM_DIR:
_DATA_DIR + '/openimages_v4/rel/kaggle_test_images/challenge2018_test',
ANN_FN: # pseudo annotation
_DATA_DIR + '/openimages_v4/rel/kaggle_test_images/detections_test.json',
ANN_FN2:
_DATA_DIR + '/openimages_v4/rel/kaggle_test_images/all_rel_only_annotations_test.json',
ANN_FN3:
_DATA_DIR + '/openimages_v4/rel/rel_9_predicates.json',
},
# VG dataset
'vg_train': {
IM_DIR:
_DATA_DIR + '/vg/VG_100K',
ANN_FN:
_DATA_DIR + '/vg/detections_train.json',
ANN_FN2:
_DATA_DIR + '/vg/rel_annotations_train.json',
ANN_FN3:
_DATA_DIR + '/vg/predicates.json',
},
'vg_val': {
IM_DIR:
_DATA_DIR + '/vg/VG_100K',
ANN_FN:
_DATA_DIR + '/vg/detections_val.json',
ANN_FN2:
_DATA_DIR + '/vg/rel_annotations_val.json',
ANN_FN3:
_DATA_DIR + '/vg/predicates.json',
},
# VRD dataset
'vrd_train': {
IM_DIR:
_DATA_DIR + '/vrd/train_images',
ANN_FN:
_DATA_DIR + '/vrd/detections_train.json',
ANN_FN2:
_DATA_DIR + '/vrd/new_annotations_train.json',
ANN_FN3:
_DATA_DIR + '/vrd/predicates.json',
},
'vrd_val': {
IM_DIR:
_DATA_DIR + '/vrd/val_images',
ANN_FN:
_DATA_DIR + '/vrd/detections_val.json',
ANN_FN2:
_DATA_DIR + '/vrd/new_annotations_val.json',
ANN_FN3:
_DATA_DIR + '/vrd/predicates.json',
},
}
| ContrastiveLosses4VRD-master | lib/datasets_rel/dataset_catalog_rel.py |
# Adapted from Detectron.pytorch/lib/datasets/voc_eval.py for
# this project by Ji Zhang, 2019
#-----------------------------------------------------------------------------
# Copyright (c) 2017-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
#
# Based on:
# --------------------------------------------------------
# Fast/er R-CNN
# Licensed under The MIT License [see LICENSE for details]
# Written by Bharath Hariharan
# --------------------------------------------------------
"""relationship AP evaluation code."""
from six.moves import cPickle as pickle
import logging
import numpy as np
import os
from tqdm import tqdm
from utils.boxes import bbox_overlaps
from utils_rel.boxes_rel import boxes_union
logger = logging.getLogger(__name__)
def prepare_mAP_dets(topk_dets, cls_num):
cls_image_ids = [[] for _ in range(cls_num)]
cls_dets = [{'confidence': np.empty(0),
'BB_s': np.empty((0, 4)),
'BB_o': np.empty((0, 4)),
'BB_r': np.empty((0, 4)),
'LBL_s': np.empty(0),
'LBL_o': np.empty(0)} for _ in range(cls_num)]
cls_gts = [{} for _ in range(cls_num)]
npos = [0 for _ in range(cls_num)]
for dets in tqdm(topk_dets):
image_id = dets['image'].split('/')[-1].split('.')[0]
sbj_boxes = dets['det_boxes_s_top']
obj_boxes = dets['det_boxes_o_top']
rel_boxes = boxes_union(sbj_boxes, obj_boxes)
sbj_labels = dets['det_labels_s_top']
obj_labels = dets['det_labels_o_top']
prd_labels = dets['det_labels_p_top']
det_scores = dets['det_scores_top']
gt_boxes_sbj = dets['gt_boxes_sbj']
gt_boxes_obj = dets['gt_boxes_obj']
gt_boxes_rel = boxes_union(gt_boxes_sbj, gt_boxes_obj)
gt_labels_sbj = dets['gt_labels_sbj']
gt_labels_prd = dets['gt_labels_prd']
gt_labels_obj = dets['gt_labels_obj']
for c in range(cls_num):
cls_inds = np.where(prd_labels == c)[0]
# logger.info(cls_inds)
if len(cls_inds):
cls_sbj_boxes = sbj_boxes[cls_inds]
cls_obj_boxes = obj_boxes[cls_inds]
cls_rel_boxes = rel_boxes[cls_inds]
cls_sbj_labels = sbj_labels[cls_inds]
cls_obj_labels = obj_labels[cls_inds]
cls_det_scores = det_scores[cls_inds]
cls_dets[c]['confidence'] = np.concatenate((cls_dets[c]['confidence'], cls_det_scores))
cls_dets[c]['BB_s'] = np.concatenate((cls_dets[c]['BB_s'], cls_sbj_boxes), 0)
cls_dets[c]['BB_o'] = np.concatenate((cls_dets[c]['BB_o'], cls_obj_boxes), 0)
cls_dets[c]['BB_r'] = np.concatenate((cls_dets[c]['BB_r'], cls_rel_boxes), 0)
cls_dets[c]['LBL_s'] = np.concatenate((cls_dets[c]['LBL_s'], cls_sbj_labels))
cls_dets[c]['LBL_o'] = np.concatenate((cls_dets[c]['LBL_o'], cls_obj_labels))
cls_image_ids[c] += [image_id] * len(cls_inds)
cls_gt_inds = np.where(gt_labels_prd == c)[0]
cls_gt_boxes_sbj = gt_boxes_sbj[cls_gt_inds]
cls_gt_boxes_obj = gt_boxes_obj[cls_gt_inds]
cls_gt_boxes_rel = gt_boxes_rel[cls_gt_inds]
cls_gt_labels_sbj = gt_labels_sbj[cls_gt_inds]
cls_gt_labels_obj = gt_labels_obj[cls_gt_inds]
cls_gt_num = len(cls_gt_inds)
det = [False] * cls_gt_num
npos[c] = npos[c] + cls_gt_num
cls_gts[c][image_id] = {'gt_boxes_sbj': cls_gt_boxes_sbj,
'gt_boxes_obj': cls_gt_boxes_obj,
'gt_boxes_rel': cls_gt_boxes_rel,
'gt_labels_sbj': cls_gt_labels_sbj,
'gt_labels_obj': cls_gt_labels_obj,
'gt_num': cls_gt_num,
'det': det}
return cls_image_ids, cls_dets, cls_gts, npos
def get_ap(rec, prec):
"""Compute AP given precision and recall.
"""
# correct AP calculation
# first append sentinel values at the end
mrec = np.concatenate(([0.], rec, [1.]))
mpre = np.concatenate(([0.], prec, [0.]))
# compute the precision envelope
for i in range(mpre.size - 1, 0, -1):
mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])
# to calculate area under PR curve, look for points
# where X axis (recall) changes value
i = np.where(mrec[1:] != mrec[:-1])[0]
# and sum (\Delta recall) * prec
ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
return ap
def ap_eval(image_ids,
dets,
gts,
npos,
rel_or_phr=True,
ovthresh=0.5):
"""
Top level function that does the relationship AP evaluation.
detpath: Path to detections
detpath.format(classname) should produce the detection results file.
classname: Category name (duh)
[ovthresh]: Overlap threshold (default = 0.5)
"""
confidence = dets['confidence']
BB_s = dets['BB_s']
BB_o = dets['BB_o']
BB_r = dets['BB_r']
LBL_s = dets['LBL_s']
LBL_o = dets['LBL_o']
# sort by confidence
sorted_ind = np.argsort(-confidence)
BB_s = BB_s[sorted_ind, :]
BB_o = BB_o[sorted_ind, :]
BB_r = BB_r[sorted_ind, :]
LBL_s = LBL_s[sorted_ind]
LBL_o = LBL_o[sorted_ind]
image_ids = [image_ids[x] for x in sorted_ind]
# go down dets and mark TPs and FPs
nd = len(image_ids)
tp = np.zeros(nd)
fp = np.zeros(nd)
gts_visited = {k: [False] * v['gt_num'] for k, v in gts.items()}
for d in range(nd):
R = gts[image_ids[d]]
visited = gts_visited[image_ids[d]]
bb_s = BB_s[d, :].astype(float)
bb_o = BB_o[d, :].astype(float)
bb_r = BB_r[d, :].astype(float)
lbl_s = LBL_s[d]
lbl_o = LBL_o[d]
ovmax = -np.inf
BBGT_s = R['gt_boxes_sbj'].astype(float)
BBGT_o = R['gt_boxes_obj'].astype(float)
BBGT_r = R['gt_boxes_rel'].astype(float)
LBLGT_s = R['gt_labels_sbj']
LBLGT_o = R['gt_labels_obj']
if BBGT_s.size > 0:
valid_mask = np.logical_and(LBLGT_s == lbl_s, LBLGT_o == lbl_o)
if valid_mask.any():
if rel_or_phr: # means it is evaluating relationships
overlaps_s = bbox_overlaps(
bb_s[None, :].astype(dtype=np.float32, copy=False),
BBGT_s.astype(dtype=np.float32, copy=False))[0]
overlaps_o = bbox_overlaps(
bb_o[None, :].astype(dtype=np.float32, copy=False),
BBGT_o.astype(dtype=np.float32, copy=False))[0]
overlaps = np.minimum(overlaps_s, overlaps_o)
else:
overlaps = bbox_overlaps(
bb_r[None, :].astype(dtype=np.float32, copy=False),
BBGT_r.astype(dtype=np.float32, copy=False))[0]
overlaps *= valid_mask
ovmax = np.max(overlaps)
jmax = np.argmax(overlaps)
else:
ovmax = 0.
jmax = -1
if ovmax > ovthresh:
if not visited[jmax]:
tp[d] = 1.
visited[jmax] = 1
else:
fp[d] = 1.
else:
fp[d] = 1.
# compute precision recall
fp = np.cumsum(fp)
tp = np.cumsum(tp)
rec = tp / (float(npos) + 1e-12)
# ground truth
prec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps)
ap = get_ap(rec, prec)
return rec, prec, ap
| ContrastiveLosses4VRD-master | lib/datasets_rel/ap_eval_rel.py |
# This file is from https://github.com/rowanz/neural-motifs/blob/master/lib/pytorch_misc.py
# Unused imports and functions are deleted
"""
Miscellaneous functions that might be useful for pytorch
"""
import numpy as np
def intersect_2d(x1, x2):
"""
Given two arrays [m1, n], [m2,n], returns a [m1, m2] array where each entry is True if those
rows match.
:param x1: [m1, n] numpy array
:param x2: [m2, n] numpy array
:return: [m1, m2] bool array of the intersections
"""
if x1.shape[1] != x2.shape[1]:
raise ValueError("Input arrays must have same #columns")
# This performs a matrix multiplication-esque thing between the two arrays
# Instead of summing, we want the equality, so we reduce in that way
res = (x1[..., None] == x2.T[None, ...]).all(1)
return res
def argsort_desc(scores):
"""
Returns the indices that sort scores descending in a smart way
:param scores: Numpy array of arbitrary size
:return: an array of size [numel(scores), dim(scores)] where each row is the index you'd
need to get the score.
"""
return np.column_stack(np.unravel_index(np.argsort(-scores.ravel()), scores.shape)) | ContrastiveLosses4VRD-master | lib/datasets_rel/pytorch_misc.py |
# Adapted from Detectron.pytorch/lib/datasets/json_dataset.py
# for this project by Ji Zhang, 2019
#-----------------------------------------------------------------------------
# Copyright (c) 2017-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
"""Representation of the standard COCO json dataset format.
When working with a new dataset, we strongly suggest to convert the dataset into
the COCO json format and use the existing code; it is not recommended to write
code to support new dataset formats.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import copy
from six.moves import cPickle as pickle
import logging
import numpy as np
import os
import scipy.sparse
import json
# Must happen before importing COCO API (which imports matplotlib)
import utils.env as envu
envu.set_up_matplotlib()
# COCO API
from pycocotools import mask as COCOmask
from pycocotools.coco import COCO
import utils.boxes as box_utils
import utils_rel.boxes_rel as box_utils_rel
from core.config import cfg
from utils.timer import Timer
from .dataset_catalog_rel import ANN_FN
from .dataset_catalog_rel import ANN_FN2
from .dataset_catalog_rel import ANN_FN3
from .dataset_catalog_rel import DATASETS
from .dataset_catalog_rel import IM_DIR
from .dataset_catalog_rel import IM_PREFIX
logger = logging.getLogger(__name__)
class JsonDatasetRel(object):
"""A class representing a COCO json dataset."""
def __init__(self, name):
assert name in DATASETS.keys(), \
'Unknown dataset name: {}'.format(name)
assert os.path.exists(DATASETS[name][IM_DIR]), \
'Image directory \'{}\' not found'.format(DATASETS[name][IM_DIR])
assert os.path.exists(DATASETS[name][ANN_FN]), \
'Annotation file \'{}\' not found'.format(DATASETS[name][ANN_FN])
logger.debug('Creating: {}'.format(name))
self.name = name
self.image_directory = DATASETS[name][IM_DIR]
self.image_prefix = (
'' if IM_PREFIX not in DATASETS[name] else DATASETS[name][IM_PREFIX]
)
self.COCO = COCO(DATASETS[name][ANN_FN])
self.debug_timer = Timer()
# Set up dataset classes
category_ids = self.COCO.getCatIds()
categories = [c['name'] for c in self.COCO.loadCats(category_ids)]
self.category_to_id_map = dict(zip(categories, category_ids))
self.classes = ['__background__'] + categories
self.num_classes = len(self.classes)
self.json_category_id_to_contiguous_id = {
v: i + 1
for i, v in enumerate(self.COCO.getCatIds())
}
self.contiguous_category_id_to_json_id = {
v: k
for k, v in self.json_category_id_to_contiguous_id.items()
}
self._init_keypoints()
assert ANN_FN2 in DATASETS[name] and ANN_FN3 in DATASETS[name]
with open(DATASETS[name][ANN_FN2]) as f:
self.rel_anns = json.load(f)
with open(DATASETS[name][ANN_FN3]) as f:
prd_categories = json.load(f)
self.obj_classes = self.classes[1:] # excludes background for now
self.num_obj_classes = len(self.obj_classes)
# self.prd_classes = ['__background__'] + prd_categories
self.prd_classes = prd_categories # excludes background for now
self.num_prd_classes = len(self.prd_classes)
@property
def cache_path(self):
cache_path = os.path.abspath(os.path.join(cfg.DATA_DIR, 'cache'))
if not os.path.exists(cache_path):
os.makedirs(cache_path)
return cache_path
@property
def valid_cached_keys(self):
""" Can load following key-ed values from the cached roidb file
'image'(image path) and 'flipped' values are already filled on _prep_roidb_entry,
so we don't need to overwrite it again.
"""
keys = ['dataset_name',
'boxes', 'segms', 'gt_classes', 'seg_areas', 'gt_overlaps',
'is_crowd', 'box_to_gt_ind_map',
'sbj_gt_boxes', 'sbj_gt_classes', 'obj_gt_boxes', 'obj_gt_classes', 'prd_gt_classes',
'sbj_gt_overlaps', 'obj_gt_overlaps', 'prd_gt_overlaps', 'pair_to_gt_ind_map']
if self.keypoints is not None:
keys += ['gt_keypoints', 'has_visible_keypoints']
return keys
def get_roidb(
self,
gt=False,
proposal_file=None,
min_proposal_size=2,
proposal_limit=-1,
crowd_filter_thresh=0
):
"""Return an roidb corresponding to the json dataset. Optionally:
- include ground truth boxes in the roidb
- add proposals specified in a proposals file
- filter proposals based on a minimum side length
- filter proposals that intersect with crowd regions
"""
assert gt is True or crowd_filter_thresh == 0, \
'Crowd filter threshold must be 0 if ground-truth annotations ' \
'are not included.'
image_ids = self.COCO.getImgIds()
image_ids.sort()
if cfg.DEBUG:
roidb = copy.deepcopy(self.COCO.loadImgs(image_ids))[:100]
else:
roidb = copy.deepcopy(self.COCO.loadImgs(image_ids))
new_roidb = []
for entry in roidb:
# In OpenImages_v4, the detection-annotated images are more than relationship
# annotated images, hence the need to check
if entry['file_name'] in self.rel_anns:
self._prep_roidb_entry(entry)
new_roidb.append(entry)
roidb = new_roidb
if gt:
# Include ground-truth object annotations
cache_filepath = os.path.join(self.cache_path, self.name + '_rel_gt_roidb.pkl')
if os.path.exists(cache_filepath) and not cfg.DEBUG:
self.debug_timer.tic()
self._add_gt_from_cache(roidb, cache_filepath)
logger.debug(
'_add_gt_from_cache took {:.3f}s'.
format(self.debug_timer.toc(average=False))
)
else:
self.debug_timer.tic()
for entry in roidb:
self._add_gt_annotations(entry)
logger.debug(
'_add_gt_annotations took {:.3f}s'.
format(self.debug_timer.toc(average=False))
)
if not cfg.DEBUG:
with open(cache_filepath, 'wb') as fp:
pickle.dump(roidb, fp, pickle.HIGHEST_PROTOCOL)
logger.info('Cache ground truth roidb to %s', cache_filepath)
if proposal_file is not None:
# Include proposals from a file
self.debug_timer.tic()
self._add_proposals_from_file(
roidb, proposal_file, min_proposal_size, proposal_limit,
crowd_filter_thresh
)
logger.debug(
'_add_proposals_from_file took {:.3f}s'.
format(self.debug_timer.toc(average=False))
)
_add_class_assignments(roidb)
return roidb
def _prep_roidb_entry(self, entry):
"""Adds empty metadata fields to an roidb entry."""
# Reference back to the parent dataset
entry['dataset'] = self
# Make file_name an abs path
im_path = os.path.join(
self.image_directory, self.image_prefix + entry['file_name']
)
assert os.path.exists(im_path), 'Image \'{}\' not found'.format(im_path)
entry['image'] = im_path
entry['flipped'] = False
entry['has_visible_keypoints'] = False
# Empty placeholders
entry['boxes'] = np.empty((0, 4), dtype=np.float32)
entry['segms'] = []
entry['gt_classes'] = np.empty((0), dtype=np.int32)
entry['seg_areas'] = np.empty((0), dtype=np.float32)
entry['gt_overlaps'] = scipy.sparse.csr_matrix(
np.empty((0, self.num_classes), dtype=np.float32)
)
entry['is_crowd'] = np.empty((0), dtype=np.bool)
# 'box_to_gt_ind_map': Shape is (#rois). Maps from each roi to the index
# in the list of rois that satisfy np.where(entry['gt_classes'] > 0)
entry['box_to_gt_ind_map'] = np.empty((0), dtype=np.int32)
if self.keypoints is not None:
entry['gt_keypoints'] = np.empty(
(0, 3, self.num_keypoints), dtype=np.int32
)
# Remove unwanted fields that come from the json file (if they exist)
for k in ['date_captured', 'url', 'license']:
if k in entry:
del entry[k]
entry['dataset_name'] = ''
# add relationship annotations
# sbj
entry['sbj_gt_boxes'] = np.empty((0, 4), dtype=np.float32)
entry['sbj_gt_classes'] = np.empty((0), dtype=np.int32)
entry['sbj_gt_overlaps'] = scipy.sparse.csr_matrix(
np.empty((0, self.num_obj_classes), dtype=np.float32)
)
# entry['sbj_box_to_gt_ind_map'] = np.empty((0), dtype=np.int32)
# obj
entry['obj_gt_boxes'] = np.empty((0, 4), dtype=np.float32)
entry['obj_gt_classes'] = np.empty((0), dtype=np.int32)
entry['obj_gt_overlaps'] = scipy.sparse.csr_matrix(
np.empty((0, self.num_obj_classes), dtype=np.float32)
)
# entry['obj_box_to_gt_ind_map'] = np.empty((0), dtype=np.int32)
# prd
entry['prd_gt_classes'] = np.empty((0), dtype=np.int32)
entry['prd_gt_overlaps'] = scipy.sparse.csr_matrix(
np.empty((0, self.num_prd_classes), dtype=np.float32)
)
entry['pair_to_gt_ind_map'] = np.empty((0), dtype=np.int32)
def _add_gt_annotations(self, entry):
"""Add ground truth annotation metadata to an roidb entry."""
ann_ids = self.COCO.getAnnIds(imgIds=entry['id'], iscrowd=None)
objs = self.COCO.loadAnns(ann_ids)
# Sanitize bboxes -- some are invalid
valid_objs = []
valid_segms = []
width = entry['width']
height = entry['height']
for obj in objs:
if obj['area'] < cfg.TRAIN.GT_MIN_AREA:
continue
if 'ignore' in obj and obj['ignore'] == 1:
continue
# Convert form (x1, y1, w, h) to (x1, y1, x2, y2)
x1, y1, x2, y2 = box_utils.xywh_to_xyxy(obj['bbox'])
x1, y1, x2, y2 = box_utils.clip_xyxy_to_image(
x1, y1, x2, y2, height, width
)
# Require non-zero seg area and more than 1x1 box size
if obj['area'] > 0 and x2 > x1 and y2 > y1:
obj['clean_bbox'] = [x1, y1, x2, y2]
valid_objs.append(obj)
# valid_segms.append(obj['segmentation'])
num_valid_objs = len(valid_objs)
boxes = np.zeros((num_valid_objs, 4), dtype=entry['boxes'].dtype)
gt_classes = np.zeros((num_valid_objs), dtype=entry['gt_classes'].dtype)
gt_overlaps = np.zeros(
(num_valid_objs, self.num_classes),
dtype=entry['gt_overlaps'].dtype
)
seg_areas = np.zeros((num_valid_objs), dtype=entry['seg_areas'].dtype)
is_crowd = np.zeros((num_valid_objs), dtype=entry['is_crowd'].dtype)
box_to_gt_ind_map = np.zeros(
(num_valid_objs), dtype=entry['box_to_gt_ind_map'].dtype
)
if self.keypoints is not None:
gt_keypoints = np.zeros(
(num_valid_objs, 3, self.num_keypoints),
dtype=entry['gt_keypoints'].dtype
)
im_has_visible_keypoints = False
for ix, obj in enumerate(valid_objs):
cls = self.json_category_id_to_contiguous_id[obj['category_id']]
boxes[ix, :] = obj['clean_bbox']
gt_classes[ix] = cls
seg_areas[ix] = obj['area']
is_crowd[ix] = obj['iscrowd']
box_to_gt_ind_map[ix] = ix
if self.keypoints is not None:
gt_keypoints[ix, :, :] = self._get_gt_keypoints(obj)
if np.sum(gt_keypoints[ix, 2, :]) > 0:
im_has_visible_keypoints = True
if obj['iscrowd']:
# Set overlap to -1 for all classes for crowd objects
# so they will be excluded during training
gt_overlaps[ix, :] = -1.0
else:
gt_overlaps[ix, cls] = 1.0
entry['boxes'] = np.append(entry['boxes'], boxes, axis=0)
entry['segms'].extend(valid_segms)
entry['gt_classes'] = np.append(entry['gt_classes'], gt_classes)
entry['seg_areas'] = np.append(entry['seg_areas'], seg_areas)
entry['gt_overlaps'] = np.append(
entry['gt_overlaps'].toarray(), gt_overlaps, axis=0
)
entry['gt_overlaps'] = scipy.sparse.csr_matrix(entry['gt_overlaps'])
entry['is_crowd'] = np.append(entry['is_crowd'], is_crowd)
entry['box_to_gt_ind_map'] = np.append(
entry['box_to_gt_ind_map'], box_to_gt_ind_map
)
if self.keypoints is not None:
entry['gt_keypoints'] = np.append(
entry['gt_keypoints'], gt_keypoints, axis=0
)
entry['has_visible_keypoints'] = im_has_visible_keypoints
entry['dataset_name'] = self.name
# add relationship annotations
im_rels = self.rel_anns[entry['file_name']]
sbj_gt_boxes = np.zeros((len(im_rels), 4), dtype=entry['sbj_gt_boxes'].dtype)
obj_gt_boxes = np.zeros((len(im_rels), 4), dtype=entry['obj_gt_boxes'].dtype)
sbj_gt_classes = np.zeros(len(im_rels), dtype=entry['sbj_gt_classes'].dtype)
obj_gt_classes = np.zeros(len(im_rels), dtype=entry['obj_gt_classes'].dtype)
prd_gt_classes = np.zeros(len(im_rels), dtype=entry['prd_gt_classes'].dtype)
for ix, rel in enumerate(im_rels):
# sbj
sbj_gt_box = box_utils_rel.y1y2x1x2_to_x1y1x2y2(rel['subject']['bbox'])
sbj_gt_boxes[ix] = sbj_gt_box
sbj_gt_classes[ix] = rel['subject']['category'] # excludes background
# obj
obj_gt_box = box_utils_rel.y1y2x1x2_to_x1y1x2y2(rel['object']['bbox'])
obj_gt_boxes[ix] = obj_gt_box
obj_gt_classes[ix] = rel['object']['category'] # excludes background
# prd
prd_gt_classes[ix] = rel['predicate'] # exclude background
entry['sbj_gt_boxes'] = np.append(entry['sbj_gt_boxes'], sbj_gt_boxes, axis=0)
entry['obj_gt_boxes'] = np.append(entry['obj_gt_boxes'], obj_gt_boxes, axis=0)
entry['sbj_gt_classes'] = np.append(entry['sbj_gt_classes'], sbj_gt_classes)
entry['obj_gt_classes'] = np.append(entry['obj_gt_classes'], obj_gt_classes)
entry['prd_gt_classes'] = np.append(entry['prd_gt_classes'], prd_gt_classes)
# misc
sbj_gt_overlaps = np.zeros(
(len(im_rels), self.num_obj_classes), dtype=entry['sbj_gt_overlaps'].dtype)
for ix in range(len(im_rels)):
sbj_cls = sbj_gt_classes[ix]
sbj_gt_overlaps[ix, sbj_cls] = 1.0
entry['sbj_gt_overlaps'] = np.append(
entry['sbj_gt_overlaps'].toarray(), sbj_gt_overlaps, axis=0)
entry['sbj_gt_overlaps'] = scipy.sparse.csr_matrix(entry['sbj_gt_overlaps'])
obj_gt_overlaps = np.zeros(
(len(im_rels), self.num_obj_classes), dtype=entry['obj_gt_overlaps'].dtype)
for ix in range(len(im_rels)):
obj_cls = obj_gt_classes[ix]
obj_gt_overlaps[ix, obj_cls] = 1.0
entry['obj_gt_overlaps'] = np.append(
entry['obj_gt_overlaps'].toarray(), obj_gt_overlaps, axis=0)
entry['obj_gt_overlaps'] = scipy.sparse.csr_matrix(entry['obj_gt_overlaps'])
prd_gt_overlaps = np.zeros(
(len(im_rels), self.num_prd_classes), dtype=entry['prd_gt_overlaps'].dtype)
pair_to_gt_ind_map = np.zeros(
(len(im_rels)), dtype=entry['pair_to_gt_ind_map'].dtype)
for ix in range(len(im_rels)):
prd_cls = prd_gt_classes[ix]
prd_gt_overlaps[ix, prd_cls] = 1.0
pair_to_gt_ind_map[ix] = ix
entry['prd_gt_overlaps'] = np.append(
entry['prd_gt_overlaps'].toarray(), prd_gt_overlaps, axis=0)
entry['prd_gt_overlaps'] = scipy.sparse.csr_matrix(entry['prd_gt_overlaps'])
entry['pair_to_gt_ind_map'] = np.append(
entry['pair_to_gt_ind_map'], pair_to_gt_ind_map)
for k in ['file_name']:
if k in entry:
del entry[k]
def _add_gt_from_cache(self, roidb, cache_filepath):
"""Add ground truth annotation metadata from cached file."""
logger.info('Loading cached gt_roidb from %s', cache_filepath)
with open(cache_filepath, 'rb') as fp:
cached_roidb = pickle.load(fp)
assert len(roidb) == len(cached_roidb)
for entry, cached_entry in zip(roidb, cached_roidb):
values = [cached_entry[key] for key in self.valid_cached_keys]
dataset_name, boxes, segms, gt_classes, seg_areas, gt_overlaps, is_crowd, box_to_gt_ind_map, \
sbj_gt_boxes, sbj_gt_classes, obj_gt_boxes, obj_gt_classes, prd_gt_classes, \
sbj_gt_overlaps, obj_gt_overlaps, prd_gt_overlaps, pair_to_gt_ind_map = values[:len(self.valid_cached_keys)]
if self.keypoints is not None:
gt_keypoints, has_visible_keypoints = values[len(self.valid_cached_keys):]
entry['dataset_name'] = dataset_name
entry['boxes'] = np.append(entry['boxes'], boxes, axis=0)
entry['segms'].extend(segms)
entry['gt_classes'] = np.append(entry['gt_classes'], gt_classes)
entry['seg_areas'] = np.append(entry['seg_areas'], seg_areas)
entry['gt_overlaps'] = scipy.sparse.csr_matrix(gt_overlaps)
entry['is_crowd'] = np.append(entry['is_crowd'], is_crowd)
entry['box_to_gt_ind_map'] = np.append(
entry['box_to_gt_ind_map'], box_to_gt_ind_map
)
if self.keypoints is not None:
entry['gt_keypoints'] = np.append(
entry['gt_keypoints'], gt_keypoints, axis=0
)
entry['has_visible_keypoints'] = has_visible_keypoints
# add relationship annotations
entry['sbj_gt_boxes'] = np.append(entry['sbj_gt_boxes'], sbj_gt_boxes, axis=0)
entry['sbj_gt_classes'] = np.append(entry['sbj_gt_classes'], sbj_gt_classes)
entry['sbj_gt_overlaps'] = scipy.sparse.csr_matrix(sbj_gt_overlaps)
entry['obj_gt_boxes'] = np.append(entry['obj_gt_boxes'], obj_gt_boxes, axis=0)
entry['obj_gt_classes'] = np.append(entry['obj_gt_classes'], obj_gt_classes)
entry['obj_gt_overlaps'] = scipy.sparse.csr_matrix(obj_gt_overlaps)
entry['prd_gt_classes'] = np.append(entry['prd_gt_classes'], prd_gt_classes)
entry['prd_gt_overlaps'] = scipy.sparse.csr_matrix(prd_gt_overlaps)
entry['pair_to_gt_ind_map'] = np.append(
entry['pair_to_gt_ind_map'], pair_to_gt_ind_map)
def _add_proposals_from_file(
self, roidb, proposal_file, min_proposal_size, top_k, crowd_thresh
):
"""Add proposals from a proposals file to an roidb."""
logger.info('Loading proposals from: {}'.format(proposal_file))
with open(proposal_file, 'r') as f:
proposals = pickle.load(f)
id_field = 'indexes' if 'indexes' in proposals else 'ids' # compat fix
_sort_proposals(proposals, id_field)
box_list = []
for i, entry in enumerate(roidb):
if i % 2500 == 0:
logger.info(' {:d}/{:d}'.format(i + 1, len(roidb)))
boxes = proposals['boxes'][i]
# Sanity check that these boxes are for the correct image id
assert entry['id'] == proposals[id_field][i]
# Remove duplicate boxes and very small boxes and then take top k
boxes = box_utils.clip_boxes_to_image(
boxes, entry['height'], entry['width']
)
keep = box_utils.unique_boxes(boxes)
boxes = boxes[keep, :]
keep = box_utils.filter_small_boxes(boxes, min_proposal_size)
boxes = boxes[keep, :]
if top_k > 0:
boxes = boxes[:top_k, :]
box_list.append(boxes)
_merge_proposal_boxes_into_roidb(roidb, box_list)
if crowd_thresh > 0:
_filter_crowd_proposals(roidb, crowd_thresh)
def _init_keypoints(self):
"""Initialize COCO keypoint information."""
self.keypoints = None
self.keypoint_flip_map = None
self.keypoints_to_id_map = None
self.num_keypoints = 0
# Thus far only the 'person' category has keypoints
if 'person' in self.category_to_id_map:
cat_info = self.COCO.loadCats([self.category_to_id_map['person']])
else:
return
# Check if the annotations contain keypoint data or not
if 'keypoints' in cat_info[0]:
keypoints = cat_info[0]['keypoints']
self.keypoints_to_id_map = dict(
zip(keypoints, range(len(keypoints))))
self.keypoints = keypoints
self.num_keypoints = len(keypoints)
if cfg.KRCNN.NUM_KEYPOINTS != -1:
assert cfg.KRCNN.NUM_KEYPOINTS == self.num_keypoints, \
"number of keypoints should equal when using multiple datasets"
else:
cfg.KRCNN.NUM_KEYPOINTS = self.num_keypoints
self.keypoint_flip_map = {
'left_eye': 'right_eye',
'left_ear': 'right_ear',
'left_shoulder': 'right_shoulder',
'left_elbow': 'right_elbow',
'left_wrist': 'right_wrist',
'left_hip': 'right_hip',
'left_knee': 'right_knee',
'left_ankle': 'right_ankle'}
def _get_gt_keypoints(self, obj):
"""Return ground truth keypoints."""
if 'keypoints' not in obj:
return None
kp = np.array(obj['keypoints'])
x = kp[0::3] # 0-indexed x coordinates
y = kp[1::3] # 0-indexed y coordinates
# 0: not labeled; 1: labeled, not inside mask;
# 2: labeled and inside mask
v = kp[2::3]
num_keypoints = len(obj['keypoints']) / 3
assert num_keypoints == self.num_keypoints
gt_kps = np.ones((3, self.num_keypoints), dtype=np.int32)
for i in range(self.num_keypoints):
gt_kps[0, i] = x[i]
gt_kps[1, i] = y[i]
gt_kps[2, i] = v[i]
return gt_kps
def add_rel_proposals(roidb, sbj_rois, obj_rois, det_rois, scales):
"""Add proposal boxes (rois) to an roidb that has ground-truth annotations
but no proposals. If the proposals are not at the original image scale,
specify the scale factor that separate them in scales.
"""
assert (sbj_rois[:, 0] == obj_rois[:, 0]).all()
sbj_box_list = []
obj_box_list = []
for i, entry in enumerate(roidb):
inv_im_scale = 1. / scales[i]
idx = np.where(sbj_rois[:, 0] == i)[0]
# include pairs where at least one box is gt
det_idx = np.where(det_rois[:, 0] == i)[0]
im_det_boxes = det_rois[det_idx, 1:] * inv_im_scale
sbj_gt_boxes = entry['sbj_gt_boxes']
obj_gt_boxes = entry['obj_gt_boxes']
unique_sbj_gt_boxes = np.unique(sbj_gt_boxes, axis=0)
unique_obj_gt_boxes = np.unique(obj_gt_boxes, axis=0)
# sbj_gt w/ obj_det
sbj_gt_boxes_paired_w_det = np.repeat(unique_sbj_gt_boxes, im_det_boxes.shape[0], axis=0)
obj_det_boxes_paired_w_gt = np.tile(im_det_boxes, (unique_sbj_gt_boxes.shape[0], 1))
# sbj_det w/ obj_gt
sbj_det_boxes_paired_w_gt = np.repeat(im_det_boxes, unique_obj_gt_boxes.shape[0], axis=0)
obj_gt_boxes_paired_w_det = np.tile(unique_obj_gt_boxes, (im_det_boxes.shape[0], 1))
# sbj_gt w/ obj_gt
sbj_gt_boxes_paired_w_gt = np.repeat(unique_sbj_gt_boxes, unique_obj_gt_boxes.shape[0], axis=0)
obj_gt_boxes_paired_w_gt = np.tile(unique_obj_gt_boxes, (unique_sbj_gt_boxes.shape[0], 1))
# now concatenate them all
sbj_box_list.append(np.concatenate(
(sbj_rois[idx, 1:] * inv_im_scale, sbj_gt_boxes_paired_w_det, sbj_det_boxes_paired_w_gt, sbj_gt_boxes_paired_w_gt)))
obj_box_list.append(np.concatenate(
(obj_rois[idx, 1:] * inv_im_scale, obj_det_boxes_paired_w_gt, obj_gt_boxes_paired_w_det, obj_gt_boxes_paired_w_gt)))
_merge_paired_boxes_into_roidb(roidb, sbj_box_list, obj_box_list)
_add_prd_class_assignments(roidb)
def _merge_paired_boxes_into_roidb(roidb, sbj_box_list, obj_box_list):
assert len(sbj_box_list) == len(obj_box_list) == len(roidb)
for i, entry in enumerate(roidb):
sbj_boxes = sbj_box_list[i]
obj_boxes = obj_box_list[i]
assert sbj_boxes.shape[0] == obj_boxes.shape[0]
num_pairs = sbj_boxes.shape[0]
sbj_gt_overlaps = np.zeros(
(num_pairs, entry['sbj_gt_overlaps'].shape[1]),
dtype=entry['sbj_gt_overlaps'].dtype
)
obj_gt_overlaps = np.zeros(
(num_pairs, entry['obj_gt_overlaps'].shape[1]),
dtype=entry['obj_gt_overlaps'].dtype
)
prd_gt_overlaps = np.zeros(
(num_pairs, entry['prd_gt_overlaps'].shape[1]),
dtype=entry['prd_gt_overlaps'].dtype
)
pair_to_gt_ind_map = -np.ones(
(num_pairs), dtype=entry['pair_to_gt_ind_map'].dtype
)
pair_gt_inds = np.arange(entry['prd_gt_classes'].shape[0])
if len(pair_gt_inds) > 0:
sbj_gt_boxes = entry['sbj_gt_boxes'][pair_gt_inds, :]
sbj_gt_classes = entry['sbj_gt_classes'][pair_gt_inds]
obj_gt_boxes = entry['obj_gt_boxes'][pair_gt_inds, :]
obj_gt_classes = entry['obj_gt_classes'][pair_gt_inds]
prd_gt_classes = entry['prd_gt_classes'][pair_gt_inds]
sbj_to_gt_overlaps = box_utils.bbox_overlaps(
sbj_boxes.astype(dtype=np.float32, copy=False),
sbj_gt_boxes.astype(dtype=np.float32, copy=False)
)
obj_to_gt_overlaps = box_utils.bbox_overlaps(
obj_boxes.astype(dtype=np.float32, copy=False),
obj_gt_boxes.astype(dtype=np.float32, copy=False)
)
pair_to_gt_overlaps = np.minimum(sbj_to_gt_overlaps, obj_to_gt_overlaps)
# Gt box that overlaps each input box the most
# (ties are broken arbitrarily by class order)
sbj_argmaxes = sbj_to_gt_overlaps.argmax(axis=1)
sbj_maxes = sbj_to_gt_overlaps.max(axis=1) # Amount of that overlap
sbj_I = np.where(sbj_maxes >= 0)[0] # Those boxes with non-zero overlap with gt boxes, get all items
obj_argmaxes = obj_to_gt_overlaps.argmax(axis=1)
obj_maxes = obj_to_gt_overlaps.max(axis=1) # Amount of that overlap
obj_I = np.where(obj_maxes >= 0)[0] # Those boxes with non-zero overlap with gt boxes, get all items
pair_argmaxes = pair_to_gt_overlaps.argmax(axis=1)
pair_maxes = pair_to_gt_overlaps.max(axis=1) # Amount of that overlap
pair_I = np.where(pair_maxes >= 0)[0] # Those boxes with non-zero overlap with gt boxes, get all items
# Record max overlaps with the class of the appropriate gt box
sbj_gt_overlaps[sbj_I, sbj_gt_classes[sbj_argmaxes[sbj_I]]] = sbj_maxes[sbj_I]
obj_gt_overlaps[obj_I, obj_gt_classes[obj_argmaxes[obj_I]]] = obj_maxes[obj_I]
prd_gt_overlaps[pair_I, prd_gt_classes[pair_argmaxes[pair_I]]] = pair_maxes[pair_I]
pair_to_gt_ind_map[pair_I] = pair_gt_inds[pair_argmaxes[pair_I]]
entry['sbj_boxes'] = sbj_boxes.astype(entry['sbj_gt_boxes'].dtype, copy=False)
entry['sbj_gt_overlaps'] = sbj_gt_overlaps
entry['sbj_gt_overlaps'] = scipy.sparse.csr_matrix(entry['sbj_gt_overlaps'])
entry['obj_boxes'] = obj_boxes.astype(entry['obj_gt_boxes'].dtype, copy=False)
entry['obj_gt_overlaps'] = obj_gt_overlaps
entry['obj_gt_overlaps'] = scipy.sparse.csr_matrix(entry['obj_gt_overlaps'])
entry['prd_gt_classes'] = -np.ones((num_pairs), dtype=entry['prd_gt_classes'].dtype)
entry['prd_gt_overlaps'] = prd_gt_overlaps
entry['prd_gt_overlaps'] = scipy.sparse.csr_matrix(entry['prd_gt_overlaps'])
entry['pair_to_gt_ind_map'] = pair_to_gt_ind_map.astype(
entry['pair_to_gt_ind_map'].dtype, copy=False)
def _add_prd_class_assignments(roidb):
"""Compute object category assignment for each box associated with each
roidb entry.
"""
for entry in roidb:
sbj_gt_overlaps = entry['sbj_gt_overlaps'].toarray()
max_sbj_overlaps = sbj_gt_overlaps.max(axis=1)
max_sbj_classes = sbj_gt_overlaps.argmax(axis=1)
entry['max_sbj_classes'] = max_sbj_classes
entry['max_sbj_overlaps'] = max_sbj_overlaps
obj_gt_overlaps = entry['obj_gt_overlaps'].toarray()
max_obj_overlaps = obj_gt_overlaps.max(axis=1)
max_obj_classes = obj_gt_overlaps.argmax(axis=1)
entry['max_obj_classes'] = max_obj_classes
entry['max_obj_overlaps'] = max_obj_overlaps
prd_gt_overlaps = entry['prd_gt_overlaps'].toarray()
max_pair_overlaps = prd_gt_overlaps.max(axis=1)
max_prd_classes = prd_gt_overlaps.argmax(axis=1)
entry['max_prd_classes'] = max_prd_classes
entry['max_pair_overlaps'] = max_pair_overlaps
# sanity checks
# if max overlap is 0, the class must be background (class 0)
# zero_inds = np.where(max_pair_overlaps == 0)[0]
# assert all(max_prd_classes[zero_inds] == 0)
# # if max overlap > 0, the class must be a fg class (not class 0)
# nonzero_inds = np.where(max_pair_overlaps > 0)[0]
# assert all(max_prd_classes[nonzero_inds] != 0)
def add_proposals(roidb, rois, scales, crowd_thresh):
"""Add proposal boxes (rois) to an roidb that has ground-truth annotations
but no proposals. If the proposals are not at the original image scale,
specify the scale factor that separate them in scales.
"""
box_list = []
for i in range(len(roidb)):
inv_im_scale = 1. / scales[i]
idx = np.where(rois[:, 0] == i)[0]
box_list.append(rois[idx, 1:] * inv_im_scale)
_merge_proposal_boxes_into_roidb(roidb, box_list)
if crowd_thresh > 0:
_filter_crowd_proposals(roidb, crowd_thresh)
_add_class_assignments(roidb)
def _merge_proposal_boxes_into_roidb(roidb, box_list):
"""Add proposal boxes to each roidb entry."""
assert len(box_list) == len(roidb)
for i, entry in enumerate(roidb):
boxes = box_list[i]
num_boxes = boxes.shape[0]
gt_overlaps = np.zeros(
(num_boxes, entry['gt_overlaps'].shape[1]),
dtype=entry['gt_overlaps'].dtype
)
box_to_gt_ind_map = -np.ones(
(num_boxes), dtype=entry['box_to_gt_ind_map'].dtype
)
# Note: unlike in other places, here we intentionally include all gt
# rois, even ones marked as crowd. Boxes that overlap with crowds will
# be filtered out later (see: _filter_crowd_proposals).
gt_inds = np.where(entry['gt_classes'] > 0)[0]
if len(gt_inds) > 0:
gt_boxes = entry['boxes'][gt_inds, :]
gt_classes = entry['gt_classes'][gt_inds]
proposal_to_gt_overlaps = box_utils.bbox_overlaps(
boxes.astype(dtype=np.float32, copy=False),
gt_boxes.astype(dtype=np.float32, copy=False)
)
# Gt box that overlaps each input box the most
# (ties are broken arbitrarily by class order)
argmaxes = proposal_to_gt_overlaps.argmax(axis=1)
# Amount of that overlap
maxes = proposal_to_gt_overlaps.max(axis=1)
# Those boxes with non-zero overlap with gt boxes
I = np.where(maxes > 0)[0]
# Record max overlaps with the class of the appropriate gt box
gt_overlaps[I, gt_classes[argmaxes[I]]] = maxes[I]
box_to_gt_ind_map[I] = gt_inds[argmaxes[I]]
entry['boxes'] = np.append(
entry['boxes'],
boxes.astype(entry['boxes'].dtype, copy=False),
axis=0
)
entry['gt_classes'] = np.append(
entry['gt_classes'],
np.zeros((num_boxes), dtype=entry['gt_classes'].dtype)
)
entry['seg_areas'] = np.append(
entry['seg_areas'],
np.zeros((num_boxes), dtype=entry['seg_areas'].dtype)
)
entry['gt_overlaps'] = np.append(
entry['gt_overlaps'].toarray(), gt_overlaps, axis=0
)
entry['gt_overlaps'] = scipy.sparse.csr_matrix(entry['gt_overlaps'])
entry['is_crowd'] = np.append(
entry['is_crowd'],
np.zeros((num_boxes), dtype=entry['is_crowd'].dtype)
)
entry['box_to_gt_ind_map'] = np.append(
entry['box_to_gt_ind_map'],
box_to_gt_ind_map.astype(
entry['box_to_gt_ind_map'].dtype, copy=False
)
)
def _filter_crowd_proposals(roidb, crowd_thresh):
"""Finds proposals that are inside crowd regions and marks them as
overlap = -1 with each ground-truth rois, which means they will be excluded
from training.
"""
for entry in roidb:
gt_overlaps = entry['gt_overlaps'].toarray()
crowd_inds = np.where(entry['is_crowd'] == 1)[0]
non_gt_inds = np.where(entry['gt_classes'] == 0)[0]
if len(crowd_inds) == 0 or len(non_gt_inds) == 0:
continue
crowd_boxes = box_utils.xyxy_to_xywh(entry['boxes'][crowd_inds, :])
non_gt_boxes = box_utils.xyxy_to_xywh(entry['boxes'][non_gt_inds, :])
iscrowd_flags = [int(True)] * len(crowd_inds)
ious = COCOmask.iou(non_gt_boxes, crowd_boxes, iscrowd_flags)
bad_inds = np.where(ious.max(axis=1) > crowd_thresh)[0]
gt_overlaps[non_gt_inds[bad_inds], :] = -1
entry['gt_overlaps'] = scipy.sparse.csr_matrix(gt_overlaps)
def _add_class_assignments(roidb):
"""Compute object category assignment for each box associated with each
roidb entry.
"""
for entry in roidb:
gt_overlaps = entry['gt_overlaps'].toarray()
# max overlap with gt over classes (columns)
max_overlaps = gt_overlaps.max(axis=1)
# gt class that had the max overlap
max_classes = gt_overlaps.argmax(axis=1)
entry['max_classes'] = max_classes
entry['max_overlaps'] = max_overlaps
# sanity checks
# if max overlap is 0, the class must be background (class 0)
zero_inds = np.where(max_overlaps == 0)[0]
assert all(max_classes[zero_inds] == 0)
# if max overlap > 0, the class must be a fg class (not class 0)
nonzero_inds = np.where(max_overlaps > 0)[0]
assert all(max_classes[nonzero_inds] != 0)
def _sort_proposals(proposals, id_field):
"""Sort proposals by the specified id field."""
order = np.argsort(proposals[id_field])
fields_to_sort = ['boxes', id_field, 'scores']
for k in fields_to_sort:
proposals[k] = [proposals[k][i] for i in order]
| ContrastiveLosses4VRD-master | lib/datasets_rel/json_dataset_rel.py |
#! /usr/bin/python3
import sys,os,json,requests
from requests.packages.urllib3.exceptions import InsecureRequestWarning
# Input file with read line by line
file = sys.argv[1]
f = open(file,'r')
lines = f.readlines()
# Itterate the each host to get the RedFish Details
for item in lines:
if item == '[master]\n' or item == '[nodes]\n':
# Split based on ":" from file
print(' ')
else:
bmc = item.strip().split(' ')
if len(bmc) > 6:
host = bmc[6]
user = bmc[7]
pas = bmc[8]
host1 = host.strip().split('=')
user1 = user.strip().split('=')
pas1 = pas.strip().split('=')
print('\n' + '*' * 70)
# method to validate whether the server is valid for RedFish API
try:
url = "https://{}/redfish/v1/".format(host1[1])
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
response = requests.get(url, verify=False,timeout=3)
# method to get valid RedFish Server details with help of res function
try:
oe = response.json()['Oem']
#Itterate the OEM Patners to get the server details
for item in oe:
if item == 'Supermicro':
cc = "https://{}/redfish/v1/Systems/1/Bios".format(host1[1])
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
output = requests.get(cc,verify=False,auth=(user1[1], pas1[1]))
sev_snp = output.json()['Attributes']['SEV-SNPSupport#01A7']
smee = output.json()['Attributes']['SMEE#003E']
iommu = output.json()['Attributes']['IOMMU#0196']
sev_asid = output.json()['Attributes']['SEV-ESASIDSpaceLimit#700C']
snp_memory = output.json()['Attributes']['SNPMemory(RMPTable)Coverage#003C']
print("SEV SNP Support: {}".format(sev_snp))
print("SMEE Status: {}".format(smee))
print("IOMMU Status: {}".format(iommu))
print("SEV-ES ASID Space Limit: {}".format(sev_asid))
print("SNP Memory Coverage: {}".format(snp_memory))
print('*' * 70)
if sev_snp == 'Enabled' and smee == 'Enabled' and iommu == 'Enabled' and snp_memory == 'Enabled' and sev_asid == 100:
print("BIOS configured for Cloud Computing")
else:
print("BIOS not configured for Cloud Computing")
elif item == 'Ami':
cc = "https://{}/redfish/v1/Systems/Self/Bios".format(host1[1])
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
output = requests.get(cc,verify=False,auth=(user1[1], pas1[1]))
sev_asid_count = output.json()['Attributes']['CbsCmnCpuSevAsidCount']
sev_asid_limit = output.json()['Attributes']['CbsCmnCpuSevAsidSpaceCtrl']
sev_asid_space = output.json()['Attributes']['CbsCmnCpuSevAsidSpaceLimit']
snp_memory = output.json()['Attributes']['CbsDbgCpuSnpMemCover']
smee = output.json()['Attributes']['CbsCmnCpuSmee']
snp_support = output.json()['Attributes']['CbsSevSnpSupport']
print("SEV SNP Support: {}".format(snp_support))
print("SEV ASID Count: {}".format(sev_asid_count))
print("SEV ASID Space Limit Control: {}".format(sev_asid_limit))
print("SEV-ES ASID Space Limit: {}".format(sev_asid_space))
print("SNP Memory Coverage: {}".format(snp_memory))
print("SMEE Status: {}".format(smee))
print(" ")
print('*' * 70)
if snp_support == 'CbsSevSnpSupportEnable' and smee == 'CbsCmnCpuSmeeEnabled' and snp_memory == 'CbsDbgCpuSnpMemCoverEnabled' and sev_asid_limit == 'CbsCmnCpuSevAsidSpaceCtrlManual' and sev_asid_count == 'CbsCmnCpuSevAsidCount509ASIDs' and sev_asid_space == 100:
print("BIOS configured for Cloud Computing")
else:
print("BIOS not configured for Cloud Computing")
else:
print('Not a valid system, it should be either ASRockRack system or SuperMicro System')
except Exception as e:
if 'Oem' in str(e):
atosurl = "https://{}/redfish/v1".format(host)
atos = requests.get(atosurl,verify=False)
print('Atos Server {} UUI is: '.format(host) + atos.json()['UUID'])
except:
print('{} Server is not for RedFish API'.format(host))
else:
print("Please update BMC IP, Username and Password details in hosts file like \n'localhost ansible_ssh_user=nvidia ansible_ssh_pass=nvidia ansible_sudo_pass=nvidia ansible_ssh_common_args='-o StrictHostKeyChecking=no' bmc_ip=<bmc-IP> bmc_username=root bmc_password=nvidia123'") | cloud-native-stack-master | playbooks/files/redfish.py |
#!/usr/bin/env python3
import json
import requests
inFileName = "../../README.md"
outFileName = "../../docs/index.html"
htmlBeforeBody = '''
<!DOCTYPE html>
<html xmlns="http://www.w3.org/1999/xhtml" lang="" xml:lang="">
<head>
<meta charset="utf-8" />
<meta name="viewport" content="width=device-width, initial-scale=1">
<title>NVTX - NVIDIA Tools Extension Library</title>
<link rel="stylesheet" href="github-markdown.css">
<style>
.markdown-body {
box-sizing: border-box;
min-width: 200px;
max-width: 980px;
margin: 0 auto;
padding: 45px;
}
@media (max-width: 767px) {
.markdown-body {
padding: 15px;
}
}
</style>
</head>
<body class="markdown-body" style="background-color: var(--color-canvas-default)">
'''
htmlAfterBody = '''
</body>
</html>
'''
try:
with open(inFileName, "r+") as inFile:
markdownText = inFile.read()
except IOError:
print("Failed to open input file ", inFileName)
sys.exit(1)
# Replace relative intra-repo links with full URLs, assuming release-v3 branch
repoBaseUrl = "https://github.com/NVIDIA/NVTX/tree/release-v3"
markdownText = markdownText.replace("(/c)", "(" + repoBaseUrl + "/c)")
markdownText = markdownText.replace("(/python)", "(" + repoBaseUrl + "/python)")
# Github replaces image links to external sites in README files with mangled things
# for "security". This means README.md cannot directly link to docs/images using
# github.io links (which it treats as external) with getting them mangled. Solution
# is to instead have READMEs on github link to raw.githubusercontent.com. Replace
# those links here with local links, so README.md converted to index.html for
# github.io will get the local images from github.io's hosting.
rawContextBaseUrl = "https://raw.githubusercontent.com/NVIDIA/NVTX/release-v3/docs/"
markdownText = markdownText.replace(rawContextBaseUrl, "")
# Replace mentions of "this repo", which makes sense in the GitHub repo's markdown files,
# with a name that makes more sense in docs hosted outside the GitHub repo.
markdownText = markdownText.replace("this repo", "the NVIDIA NVTX GitHub repo")
url = "https://api.github.com/markdown"
postData = {"text": markdownText, "mode": "markdown"}
try:
response = requests.post(url, json = postData)
response.raise_for_status()
except Exception as ex:
print("Failure in API call to GitHub to convert markdown to html:", ex)
sys.exit(1)
html = htmlBeforeBody + response.text + htmlAfterBody
try:
with open(outFileName, "w") as outFile:
outFile.write(html)
except IOError:
print("Failed to open output file ", outFileName)
sys.exit(1)
print(f'Successfully generated "{outFileName}" from "{inFileName}".') | NVTX-release-v3 | tools/docs/generate-readme.py |
# Copyright 2020-2022 NVIDIA Corporation. All rights reserved.
#
# Licensed under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
import os
import glob
import sysconfig
from distutils.sysconfig import get_python_lib
from Cython.Build import cythonize
from setuptools import find_packages, setup
from setuptools.extension import Extension
cython_files = ["nvtx/**/*.pyx"]
try:
nthreads = int(os.environ.get("PARALLEL_LEVEL", "0") or "0")
except Exception:
nthreads = 0
include_dirs = [os.path.dirname(sysconfig.get_path("include")),]
library_dirs = [get_python_lib()]
extensions = [
Extension(
"*",
sources=cython_files,
include_dirs=include_dirs,
library_dirs=library_dirs,
language="c",
)
]
cython_tests = glob.glob("nvtx/_lib/tests/*.pyx")
# tests:
extensions += cythonize(
[
Extension(
"*",
sources=cython_tests,
include_dirs=include_dirs,
library_dirs=library_dirs,
language="c"
)
],
nthreads=nthreads,
compiler_directives=dict(
profile=True, language_level=3, embedsignature=True, binding=True
),
)
setup(
name="nvtx",
version="0.2.5",
description="PyNVTX - Python code annotation library",
url="https://github.com/NVIDIA/nvtx",
author="NVIDIA Corporation",
license="Apache 2.0",
classifiers=[
"Intended Audience :: Developers",
"Topic :: Database",
"Topic :: Scientific/Engineering",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
],
# Include the separately-compiled shared library
ext_modules=cythonize(
extensions,
nthreads=nthreads,
compiler_directives=dict(
profile=False, language_level=3, embedsignature=True
),
),
packages=find_packages(include=["nvtx", "nvtx.*"]),
package_data=dict.fromkeys(
find_packages(include=["nvtx._lib*"]), ["*.pxd"],
),
license_files=["LICENSE.txt"],
zip_safe=False,
)
| NVTX-release-v3 | python/setup.py |
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = "nvtx"
copyright = "2020-2022, NVIDIA Corporation"
author = "NVIDIA Corporation"
# The full version, including alpha/beta/rc tags
release = "0.2.5"
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ["sphinx.ext.autodoc", "sphinx.ext.napoleon"]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
| NVTX-release-v3 | python/docs/conf.py |
# Copyright 2020-2022 NVIDIA Corporation. All rights reserved.
#
# Licensed under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
from nvtx.nvtx import (
annotate,
enabled,
pop_range,
push_range,
start_range,
end_range,
mark
)
from nvtx._lib.profiler import Profile
| NVTX-release-v3 | python/nvtx/__init__.py |
# Copyright 2020-2022 NVIDIA Corporation. All rights reserved.
#
# Licensed under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
import contextlib
import os
from functools import wraps
from nvtx._lib import (
Domain,
EventAttributes,
mark as libnvtx_mark,
pop_range as libnvtx_pop_range,
push_range as libnvtx_push_range,
start_range as libnvtx_start_range,
end_range as libnvtx_end_range
)
_ENABLED = not os.getenv("NVTX_DISABLE", False)
class annotate:
"""
Annotate code ranges using a context manager or a decorator.
"""
def __init__(self, message=None, color=None, domain=None, category=None):
"""
Annotate a function or a code range.
Parameters
----------
message : str, optional
A message associated with the annotated code range.
When used as a decorator, the default value of message
is the name of the function being decorated.
When used as a context manager, the default value
is the empty string.
color : str or color, optional
A color associated with the annotated code range.
Supports `matplotlib` colors if it is available.
domain : str, optional
A string specifying the domain under which the code range is
scoped. The default domain is named "NVTX".
category : str, int, optional
A string or an integer specifying the category within the domain
under which the code range is scoped. If unspecified, the code
range is not associated with a category.
Examples
--------
>>> import nvtx
>>> import time
Using a decorator:
>>> @nvtx.annotate("my_func", color="red", domain="my_domain")
... def func():
... time.sleep(0.1)
Using a context manager:
>>> with nvtx.annotate("my_code_range", color="blue"):
... time.sleep(10)
...
"""
self.domain = Domain(domain)
category_id = None
if isinstance(category, int):
category_id = category
elif isinstance(category, str):
category_id = self.domain.get_category_id(category)
self.attributes = EventAttributes(message, color, category_id)
def __reduce__(self):
return (
self.__class__,
(self.attributes.message, self.attributes.color, self.domain.name),
)
def __enter__(self):
libnvtx_push_range(self.attributes, self.domain.handle)
return self
def __exit__(self, *exc):
libnvtx_pop_range(self.domain.handle)
return False
def __call__(self, func):
if not self.attributes.message:
self.attributes.message = func.__name__
@wraps(func)
def inner(*args, **kwargs):
libnvtx_push_range(self.attributes, self.domain.handle)
result = func(*args, **kwargs)
libnvtx_pop_range(self.domain.handle)
return result
return inner
def mark(message=None, color="blue", domain=None, category=None):
"""
Mark an instantaneous event.
Parameters
----------
message : str
A message associatedn with the event.
color : str, color, optional
Color associated with the event.
domain : str, optional
A string specifuing the domain under which the event is scoped.
The default domain is named "NVTX".
category : str, int, optional
A string or an integer specifying the category within the domain
under which the event is scoped. If unspecified, the event is
not associated with a category.
"""
domain = Domain(domain)
category_id = None
if isinstance(category, int):
category_id = category
elif isinstance(category, str):
category_id = domain.get_category_id(category)
attributes = EventAttributes(message, color, category_id)
libnvtx_mark(attributes, domain.handle)
def push_range(message=None, color="blue", domain=None, category=None):
"""
Mark the beginning of a code range.
Parameters
----------
message : str, optional
A message associated with the annotated code range.
color : str, color, optional
A color associated with the annotated code range.
Supports
domain : str, optional
Name of a domain under which the code range is scoped.
The default domain name is "NVTX".
category : str, int, optional
A string or an integer specifying the category within the domain
under which the code range is scoped. If unspecified, the code range
is not associated with a category.
Examples
--------
>>> import time
>>> import nvtx
>>> nvtx.push_range("my_code_range", domain="my_domain")
>>> time.sleep(1)
>>> nvtx.pop_range(domain="my_domain")
"""
domain = Domain(domain)
category_id = None
if isinstance(category, int):
category_id = category
elif isinstance(category, str):
category_id = domain.get_category_id(category)
libnvtx_push_range(EventAttributes(message, color, category_id), domain.handle)
def pop_range(domain=None):
"""
Mark the end of a code range that was started with `push_range`.
Parameters
----------
domain : str, optional
The domain under which the code range is scoped. The default
domain is "NVTX".
"""
libnvtx_pop_range(Domain(domain).handle)
def start_range(message=None, color="blue", domain=None, category=None):
"""
Mark the beginning of a code range.
Parameters
----------
message : str, optional
A message associated with the annotated code range.
color : str, color, optional
A color associated with the annotated code range.
Supports
domain : str, optional
Name of a domain under which the code range is scoped.
The default domain name is "NVTX".
category : str, int, optional
A string or an integer specifying the category within the domain
under which the code range is scoped. If unspecified, the code range
is not associated with a category.
Returns
-------
An object of type `RangeId` that must be passed to the `end_range()` function.
Examples
--------
>>> import time
>>> import nvtx
>>> range_id = nvtx.start_range("my_code_range", domain="my_domain")
>>> time.sleep(1)
>>> nvtx.end_range(range_id, domain="my_domain")
"""
domain = Domain(domain)
category_id = None
if isinstance(category, int):
category_id = category
elif isinstance(category, str):
category_id = domain.get_category_id(category)
marker_id = libnvtx_start_range(
EventAttributes(message, color, category_id), domain.handle
)
return marker_id
def end_range(range_id):
"""
Mark the end of a code range that was started with `start_range`.
Parameters
----------
range_id : RangeId
The `RangeId` object returned by the `start_range` function.
"""
libnvtx_end_range(range_id)
def enabled():
"""
Returns True if nvtx is enabled.
"""
return _ENABLED
if not enabled():
class annotate(contextlib.nullcontext):
def __init__(self, *args, **kwargs):
pass
def __call__(self, func):
return func
# Could use a decorator here but overheads are significant enough
# not to. See https://github.com/NVIDIA/NVTX/pull/24 for discussion.
def mark(message=None, color=None, domain=None, category=None): pass
def push_range(message=None, color=None, domain=None, category=None): pass
def pop_range(domain=None): pass
def start_range(message=None, color=None, domain=None, category=None): pass
def end_range(range_id): pass
| NVTX-release-v3 | python/nvtx/nvtx.py |
import sys
from runpy import run_path
from optparse import OptionParser
def main():
from nvtx import Profile
usage = "%prog [options] scriptfile [args] ..."
parser = OptionParser(usage)
parser.add_option(
"--linenos",
action="store_true",
dest="linenos",
default=True,
help="Include file and line number information in annotations. Otherwise, "
"only the function name is used."
)
parser.add_option(
"--no-linenos",
action="store_false",
dest="linenos",
default=True,
help="Do not include file and line number information in annotations."
)
parser.add_option(
"--annotate-cfuncs",
action="store_true",
dest="annotate_cfuncs",
default=False,
help="Also annotate C-extension and builtin functions. [default: %default]",
)
options, args = parser.parse_args()
script_file = args[0]
sys.argv = args
profiler = Profile(
linenos=options.linenos,
annotate_cfuncs=options.annotate_cfuncs
)
profiler.enable()
try:
run_path(script_file)
finally:
profiler.disable()
if __name__ == "__main__":
main()
| NVTX-release-v3 | python/nvtx/__main__.py |
# Copyright 2020-2022 NVIDIA Corporation. All rights reserved.
#
# Licensed under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
import functools
_NVTX_COLORS = {
None: 0x0000FF, # blue
"green": 0x008000,
"blue": 0x0000FF,
"yellow": 0xFFFF00,
"purple": 0x800080,
"rapids": 0x7400FF,
"cyan": 0x00FFFF,
"red": 0xFF0000,
"white": 0xFFFFFF,
"darkgreen": 0x006400,
"orange": 0xFFA500,
}
@functools.lru_cache()
def color_to_hex(color=None):
"""
Convert color to ARGB hex value.
"""
if isinstance(color, int):
return color
if color in _NVTX_COLORS:
return _NVTX_COLORS[color]
try:
import matplotlib.colors
except ImportError as e:
raise TypeError(
f"Invalid color {color}. Please install matplotlib "
"for additional colors support"
) from e
rgba = matplotlib.colors.to_rgba(color)
argb = (rgba[-1], rgba[0], rgba[1], rgba[2])
return int(matplotlib.colors.to_hex(argb, keep_alpha=True)[1:], 16)
| NVTX-release-v3 | python/nvtx/colors.py |
# Copyright 2020-2022 NVIDIA Corporation. All rights reserved.
#
# Licensed under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
import pickle
import pytest
import nvtx
@pytest.mark.parametrize(
"message",
[
None,
"",
"x",
"abc",
"abc def"
]
)
@pytest.mark.parametrize(
"color",
[
None,
"red",
"green",
"blue"
]
)
@pytest.mark.parametrize(
"domain",
[
None,
"",
"x",
"abc",
"abc def"
]
)
def test_annotate_context_manager(message, color, domain):
with nvtx.annotate(message=message, color=color, domain=domain):
pass
@pytest.mark.parametrize(
"message",
[
None,
"",
"x",
"abc",
"abc def"
]
)
@pytest.mark.parametrize(
"color",
[
None,
"red",
"green",
"blue"
]
)
@pytest.mark.parametrize(
"domain",
[
None,
"",
"x",
"abc",
"abc def"
]
)
def test_annotate_decorator(message, color, domain):
@nvtx.annotate(message=message, color=color, domain=domain)
def foo():
pass
foo()
def test_pickle_annotate():
orig = nvtx.annotate(message="foo", color="blue", domain="test")
pickled = pickle.dumps(orig)
unpickled = pickle.loads(pickled)
assert orig.attributes.message == unpickled.attributes.message
assert orig.attributes.color == unpickled.attributes.color
assert orig.domain == unpickled.domain
def test_domain_reuse():
a = nvtx._lib.Domain("x")
b = nvtx._lib.Domain("x")
assert a is b
c = nvtx._lib.Domain("y")
assert a is not c
@pytest.mark.parametrize(
"message",
[
None,
"",
"x",
"abc",
"abc def"
]
)
@pytest.mark.parametrize(
"color",
[
None,
"red",
"green",
"blue"
]
)
@pytest.mark.parametrize(
"domain",
[
None,
"",
"x",
"abc",
"abc def"
]
)
@pytest.mark.parametrize(
"category",
[
None,
"",
"y"
"x",
"abc",
"abc def",
0,
1,
]
)
def test_categories_basic(message, color, domain, category):
with nvtx.annotate(message=message, domain=domain, category=category):
pass
def test_get_category_id():
dom = nvtx._lib.Domain("foo")
id1 = dom.get_category_id("bar")
id2 = dom.get_category_id("bar")
assert id1 == id2
id3 = dom.get_category_id("baz")
assert id2 != id3
@pytest.mark.parametrize(
"message",
[
None,
"abc",
]
)
@pytest.mark.parametrize(
"color",
[
None,
"red",
]
)
@pytest.mark.parametrize(
"domain",
[
None,
"abc",
]
)
@pytest.mark.parametrize(
"category",
[
None,
"abc",
1,
]
)
def test_start_end(message, color, domain, category):
rng = nvtx.start_range(message, color, domain, category)
nvtx.end_range(rng)
@pytest.mark.parametrize(
"message",
[
None,
"abc",
]
)
@pytest.mark.parametrize(
"color",
[
None,
"red",
]
)
@pytest.mark.parametrize(
"domain",
[
None,
"abc",
]
)
@pytest.mark.parametrize(
"category",
[
None,
"abc",
1,
]
)
def test_push_pop(message, color, domain, category):
nvtx.push_range(message, color, domain, category)
nvtx.pop_range()
@pytest.mark.parametrize(
"message",
[
None,
"abc",
]
)
@pytest.mark.parametrize(
"color",
[
None,
"red",
]
)
@pytest.mark.parametrize(
"domain",
[
None,
"abc",
]
)
@pytest.mark.parametrize(
"category",
[
None,
"abc",
1,
]
)
def test_mark(message, color, domain, category):
nvtx.mark(message, color, domain, category)
| NVTX-release-v3 | python/nvtx/tests/test_basic.py |
NVTX-release-v3 | python/nvtx/tests/test_profiler.py |
|
# Copyright 2021-2022 NVIDIA Corporation. All rights reserved.
#
# Licensed under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
import functools
import importlib
import sys
def py_func(func):
"""
Wraps func in a plain Python function.
"""
@functools.wraps(func)
def wrapped(*args, **kwargs):
return func(*args, **kwargs)
return wrapped
cython_test_modules = ["nvtx._lib.tests.test_profiler"]
for mod in cython_test_modules:
try:
# For each callable in `mod` with name `test_*`,
# wrap the callable in a plain Python function
# and set the result as an attribute of this module.
mod = importlib.import_module(mod)
for name in dir(mod):
item = getattr(mod, name)
if callable(item) and name.startswith("test_"):
item = py_func(item)
setattr(sys.modules[__name__], name, item)
except ImportError:
pass
| NVTX-release-v3 | python/nvtx/tests/test_cython.py |
# Copyright 2020-2022 NVIDIA Corporation. All rights reserved.
#
# Licensed under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
from nvtx._lib.lib import (
Domain,
EventAttributes,
pop_range,
push_range,
start_range,
end_range,
mark
)
| NVTX-release-v3 | python/nvtx/_lib/__init__.py |
NVTX-release-v3 | python/nvtx/_lib/tests/__init__.py |
|
# Copyright 2020-2022 NVIDIA Corporation. All rights reserved.
#
# Licensed under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
| NVTX-release-v3 | python/nvtx/utils/__init__.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.