peacock-data-public-datasets-idc-mint
/
docker
/bloom13b
/Model-References
/TensorFlow
/computer_vision
/densenet
/densenet.py
############################################################################### | |
# Copyright (C) 2020-2021 Habana Labs, Ltd. an Intel Company | |
############################################################################### | |
# List of changes: | |
# - Added Conv2D and Dense kernel and bias regularization | |
# - Added densent general configuration to support densent121, densenet161, densenet169 | |
import tensorflow as tf | |
from tensorflow.keras.layers import Input, Concatenate, ZeroPadding2D | |
from tensorflow.keras.layers import Dense, Dropout, Activation | |
from tensorflow.keras.layers import Conv2D | |
from tensorflow.keras.layers import AveragePooling2D, GlobalAveragePooling2D, MaxPooling2D | |
from tensorflow.keras.layers import BatchNormalization | |
from tensorflow.keras.models import Model | |
def densenet_model(img_rows, img_cols, color_type=1, nb_dense_block=4, growth_rate=32, nb_filter=64,nb_layers=[6, 12, 24, 16], reduction=0.5, | |
dropout_rate=0.0, weight_decay=1e-4, num_classes=None): | |
''' | |
DenseNet 121 Model for Keras | |
Model Schema is based on | |
https://github.com/flyyufelix/DenseNet-Keras | |
ImageNet Pretrained Weights | |
Theano: https://drive.google.com/open?id=0Byy2AcGyEVxfMlRYb3YzV210VzQ | |
TensorFlow: https://drive.google.com/open?id=0Byy2AcGyEVxfSTA4SHJVOHNuTXc | |
# Arguments | |
nb_dense_block: number of dense blocks to add to end | |
growth_rate: number of filters to add per dense block | |
nb_filter: initial number of filters | |
reduction: reduction factor of transition blocks. | |
dropout_rate: dropout rate | |
weight_decay: weight decay factor | |
classes: optional number of classes to classify images | |
weights_path: path to pre-trained weights | |
# Returns | |
A Keras model instance. | |
''' | |
eps = 1.1e-5 | |
# compute compression factor | |
compression = 1.0 - reduction | |
# Handle Dimension Ordering for different backends | |
global concat_axis | |
concat_axis = 3 | |
img_input = Input(shape=(img_rows, img_cols, color_type), name='data') | |
# Initial convolution | |
x = ZeroPadding2D((3, 3), name='conv1_zeropadding')(img_input) | |
x = Conv2D(nb_filter, 7, strides=(2, 2), name='conv1', use_bias=False, | |
kernel_regularizer=tf.keras.regularizers.l2(weight_decay), | |
bias_regularizer=tf.keras.regularizers.l2(weight_decay))(x) | |
x = BatchNormalization(epsilon=eps, axis=concat_axis, name='conv1_bn', | |
beta_regularizer=tf.keras.regularizers.l2(weight_decay), | |
gamma_regularizer=tf.keras.regularizers.l2(weight_decay))(x) | |
# x = Scale(axis=concat_axis, name='conv1_scale')(x) | |
x = Activation('relu', name='relu1')(x) | |
x = ZeroPadding2D((1, 1), name='pool1_zeropadding')(x) | |
x = MaxPooling2D((3, 3), strides=(2, 2), name='pool1')(x) | |
# Add dense blocks | |
for block_idx in range(nb_dense_block - 1): | |
stage = block_idx + 2 | |
x, nb_filter = dense_block(x, stage, nb_layers[block_idx], nb_filter, growth_rate, dropout_rate=dropout_rate, | |
weight_decay=weight_decay) | |
# Add transition_block | |
x = transition_block(x, stage, nb_filter, compression=compression, dropout_rate=dropout_rate, | |
weight_decay=weight_decay) | |
nb_filter = int(nb_filter * compression) | |
final_stage = stage + 1 | |
x, nb_filter = dense_block(x, final_stage, nb_layers[-1], nb_filter, growth_rate, dropout_rate=dropout_rate, | |
weight_decay=weight_decay) | |
x = BatchNormalization(epsilon=eps, axis=concat_axis, name='conv' + str(final_stage) + '_blk_bn', | |
beta_regularizer=tf.keras.regularizers.l2(weight_decay), | |
gamma_regularizer=tf.keras.regularizers.l2(weight_decay))(x) | |
# x = Scale(axis=concat_axis, name='conv'+str(final_stage)+'_blk_scale')(x) | |
x = Activation('relu', name='relu' + str(final_stage) + '_blk')(x) | |
x_fc = GlobalAveragePooling2D(name='pool' + str(final_stage))(x) | |
x_fc = Dense(num_classes, kernel_regularizer=tf.keras.regularizers.l2(weight_decay), | |
bias_regularizer=tf.keras.regularizers.l2(weight_decay), name='fc6')(x_fc) | |
x_fc = Activation('softmax', name='prob')(x_fc) | |
model = Model(img_input, x_fc, name='densenet') | |
return model | |
def conv_block(x, stage, branch, nb_filter, dropout_rate=None, weight_decay=1e-4): | |
'''Apply BatchNorm, Relu, bottleneck 1x1 Conv2D, 3x3 Conv2D, and option dropout | |
# Arguments | |
x: input tensor | |
stage: index for dense block | |
branch: layer index within each dense block | |
nb_filter: number of filters | |
dropout_rate: dropout rate | |
weight_decay: weight decay factor | |
''' | |
eps = 1.1e-5 | |
conv_name_base = 'conv' + str(stage) + '_' + str(branch) | |
relu_name_base = 'relu' + str(stage) + '_' + str(branch) | |
# 1x1 Convolution (Bottleneck layer) | |
inter_channel = nb_filter * 4 | |
x = BatchNormalization(epsilon=eps, axis=concat_axis, name=conv_name_base + '_x1_bn', | |
beta_regularizer=tf.keras.regularizers.l2(weight_decay), | |
gamma_regularizer=tf.keras.regularizers.l2(weight_decay))(x) | |
# x = Scale(axis=concat_axis, name=conv_name_base+'_x1_scale')(x) | |
x = Activation('relu', name=relu_name_base + '_x1')(x) | |
x = Conv2D(inter_channel, 1, name=conv_name_base + '_x1', use_bias=False, | |
kernel_regularizer=tf.keras.regularizers.l2(weight_decay), | |
bias_regularizer=tf.keras.regularizers.l2(weight_decay))(x) | |
if dropout_rate: | |
x = Dropout(dropout_rate)(x) | |
# 3x3 Convolution | |
x = BatchNormalization(epsilon=eps, axis=concat_axis, name=conv_name_base + '_x2_bn', | |
beta_regularizer=tf.keras.regularizers.l2(weight_decay), | |
gamma_regularizer=tf.keras.regularizers.l2(weight_decay))(x) | |
# x = Scale(axis=concat_axis, name=conv_name_base+'_x2_scale')(x) | |
x = Activation('relu', name=relu_name_base + '_x2')(x) | |
x = ZeroPadding2D((1, 1), name=conv_name_base + '_x2_zeropadding')(x) | |
x = Conv2D(nb_filter, 3, name=conv_name_base + '_x2', use_bias=False, | |
kernel_regularizer=tf.keras.regularizers.l2(weight_decay), | |
bias_regularizer=tf.keras.regularizers.l2(weight_decay))(x) | |
if dropout_rate: | |
x = Dropout(dropout_rate)(x) | |
return x | |
def transition_block(x, stage, nb_filter, compression=1.0, dropout_rate=None, weight_decay=1E-4): | |
''' Apply BatchNorm, 1x1 Convolution, averagePooling, optional compression, dropout | |
# Arguments | |
x: input tensor | |
stage: index for dense block | |
nb_filter: number of filters | |
compression: calculated as 1 - reduction. Reduces the number of feature maps in the transition block. | |
dropout_rate: dropout rate | |
weight_decay: weight decay factor | |
''' | |
eps = 1.1e-5 | |
conv_name_base = 'conv' + str(stage) + '_blk' | |
relu_name_base = 'relu' + str(stage) + '_blk' | |
pool_name_base = 'pool' + str(stage) | |
x = BatchNormalization(epsilon=eps, axis=concat_axis, name=conv_name_base + '_bn', | |
beta_regularizer=tf.keras.regularizers.l2(weight_decay), | |
gamma_regularizer=tf.keras.regularizers.l2(weight_decay))(x) | |
# x = Scale(axis=concat_axis, name=conv_name_base+'_scale')(x) | |
x = Activation('relu', name=relu_name_base)(x) | |
x = Conv2D(int(nb_filter * compression), 1, name=conv_name_base, use_bias=False, | |
kernel_regularizer=tf.keras.regularizers.l2(weight_decay), | |
bias_regularizer=tf.keras.regularizers.l2(weight_decay))(x) | |
if dropout_rate: | |
x = Dropout(dropout_rate)(x) | |
x = AveragePooling2D((2, 2), strides=(2, 2), name=pool_name_base)(x) | |
return x | |
def dense_block(x, stage, nb_layers, nb_filter, growth_rate, dropout_rate=None, weight_decay=1e-4, | |
grow_nb_filters=True): | |
''' Build a dense_block where the output of each conv_block is fed to subsequent ones | |
# Arguments | |
x: input tensor | |
stage: index for dense block | |
nb_layers: the number of layers of conv_block to append to the model. | |
nb_filter: number of filters | |
growth_rate: growth rate | |
dropout_rate: dropout rate | |
weight_decay: weight decay factor | |
grow_nb_filters: flag to decide to allow number of filters to grow | |
''' | |
eps = 1.1e-5 | |
concat_feat = x | |
for i in range(nb_layers): | |
branch = i + 1 | |
x = conv_block(concat_feat, stage, branch, growth_rate, dropout_rate, weight_decay) | |
concat_feat = Concatenate()([concat_feat, x]) | |
if grow_nb_filters: | |
nb_filter += growth_rate | |
return concat_feat, nb_filter | |