HemaAM commited on
Commit
96d6bb6
·
1 Parent(s): 6c4a6d3

Upload custom resnet model

Browse files

This is the initial upload and commit of the custom resnet model for which gradio based inference app is developed for cifar10 dataset

Files changed (1) hide show
  1. model.py +178 -0
model.py ADDED
@@ -0,0 +1,178 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch.nn as nn
2
+ import torchinfo
3
+ import torch.nn.functional as F
4
+
5
+
6
+ class convLayer(nn.Module):
7
+ def __init__(self, l_input_c,
8
+ l_output_c, bias=False,
9
+ padding=1, stride=1,
10
+ max_pooling=False,
11
+ dropout=0):
12
+ super (convLayer, self).__init__()
13
+
14
+
15
+ self.convLayer = nn.Conv2d(in_channels=l_input_c,
16
+ out_channels=l_output_c,
17
+ kernel_size=(3, 3),
18
+ stride=stride,
19
+ padding= padding,
20
+ padding_mode='replicate',
21
+ bias=bias)
22
+
23
+ self.max_pooling = None
24
+ if(max_pooling == True):
25
+ self.max_pooling = nn.MaxPool2d(2, 2)
26
+
27
+ self.normLayer = nn.BatchNorm2d(l_output_c)
28
+
29
+ self.activationLayer = nn.ReLU()
30
+
31
+ self.dropout = None
32
+ if(dropout > 0):
33
+ self.dropout = nn.Dropout(dropout)
34
+
35
+
36
+ def forward(self, x):
37
+
38
+ x = self.convLayer(x)
39
+
40
+ if (self.max_pooling is not None):
41
+ x = self.max_pooling(x)
42
+
43
+ x = self.normLayer(x)
44
+ x = self.activationLayer(x)
45
+
46
+ if (self.dropout is not None):
47
+ x = self.dropout(x)
48
+
49
+ return x
50
+
51
+
52
+ class custBlock(nn.Module):
53
+ def __init__(self, l_input_c,
54
+ l_output_c, bias=False,
55
+ padding=1, stride=1,
56
+ max_pooling=True,
57
+ dropout=0, residual_links=2):
58
+ super (custBlock, self).__init__()
59
+
60
+
61
+ self.conv_pool_block = convLayer(l_input_c=l_input_c,
62
+ l_output_c=l_output_c,
63
+ bias=bias, padding=padding,
64
+ stride=stride, max_pooling=max_pooling,
65
+ dropout=dropout)
66
+
67
+ self.residual_block = None
68
+ if(residual_links > 0):
69
+ res_layer_seq = []
70
+ for link in range(0, residual_links):
71
+ res_layer_seq.append(
72
+ convLayer(l_input_c=l_output_c,
73
+ l_output_c=l_output_c,
74
+ bias=bias, padding=padding,
75
+ stride=stride, max_pooling=False,
76
+ dropout=dropout)
77
+ )
78
+
79
+ self.residual_block = nn.Sequential(*res_layer_seq)
80
+
81
+
82
+ def forward(self, x):
83
+
84
+ x = self.conv_pool_block(x)
85
+
86
+ if (self.residual_block is not None):
87
+ tmp_x = x
88
+ x = self.residual_block(x)
89
+ x = x + tmp_x
90
+
91
+ return x
92
+
93
+
94
+ class custResNet(nn.Module):
95
+ def __init__(self, dropout=0):
96
+ super(custResNet, self).__init__()
97
+
98
+ ##### Prep Block #####
99
+ # This block has a 3x3 convolution layer with stride=1,
100
+ # padding=1 followed by batch normalization and RELU
101
+ # 64 kernels are used in this block
102
+ # By default, dropout is set to 0
103
+ self.prep_block = custBlock(l_input_c=3, l_output_c=64,
104
+ max_pooling=False, dropout= dropout,
105
+ residual_links=0
106
+ ) # output_size = 32, rf_out = 3
107
+
108
+ ##### Convolution Block - 1 #####
109
+ # This block in the first step has a 3x3 convolution layer with
110
+ # stride=1, padding=1 followed by Max pooling, batch normalization
111
+ # and ReLU. In the second step a network with residual links, with
112
+ # each link having 3x3 convolution with stride=1, padding=1
113
+ # followed by batch normalization and ReLU. And in the third step,
114
+ # the result from the first step and the result of the residual network
115
+ # from the second step are added to make a skip connection.
116
+ # 128 kernels are used in this block
117
+ # By default, dropout is set to 0
118
+ self.block1 = custBlock(l_input_c=64, l_output_c=128,
119
+ max_pooling=True, dropout= dropout,
120
+ residual_links=2
121
+ ) # output_size = 16, rf_out = 13
122
+
123
+ ##### Convolution Block - 2 #####
124
+ # This block in the first step has a 3x3 convolution layer with
125
+ # stride=1, padding=1 followed by Max pooling, batch normalization
126
+ # and ReLU.
127
+ # 256 kernels are used in this block
128
+ # By default, dropout is set to 0
129
+ self.block2 = custBlock(l_input_c=128, l_output_c=256,
130
+ max_pooling=True, dropout= dropout,
131
+ residual_links=0
132
+ ) # output_size = 8, rf_out = 17
133
+
134
+ ##### Convolution Block - 3 #####
135
+ # This block in the first step has a 3x3 convolution layer with
136
+ # stride=1, padding=1 followed by Max pooling, batch normalization
137
+ # and ReLU. In the second step a network with residual links, with
138
+ # each link having 3x3 convolution with stride=1, padding=1
139
+ # followed by batch normalization and ReLU. And in the third step,
140
+ # the result from the first step and the result of the residual network
141
+ # from the second step are added to make a skip connection.
142
+ # 512 kernels are used in this block
143
+ # By default, dropout is set to 0
144
+ self.block3 = custBlock(l_input_c=256, l_output_c=512,
145
+ max_pooling=True, dropout= dropout,
146
+ residual_links=2
147
+ ) # output_size = 4, rf_out = 57
148
+
149
+ self.max_pool_layer = nn.MaxPool2d(4, 4)
150
+ # output_size = 1, rf_out = 81
151
+ self.flatten_layer = nn.Flatten()
152
+ self.fc = nn.Linear(512, 10)
153
+ #self.softmax = nn.Softmax()
154
+
155
+
156
+ def forward(self, x):
157
+
158
+ x = self.prep_block(x)
159
+ x = self.block1(x)
160
+ x = self.block2(x)
161
+ x = self.block3(x)
162
+ x = self.max_pool_layer(x)
163
+ x = self.flatten_layer(x)
164
+ x = self.fc(x)
165
+
166
+ return x
167
+
168
+
169
+ # Network Summary
170
+ def summary(self, input_size=None, depth=10):
171
+ return torchinfo.summary(self, input_size=input_size,
172
+ depth=depth,
173
+ col_names=["input_size",
174
+ "output_size",
175
+ "num_params",
176
+ "kernel_size",
177
+ "params_percent"])
178
+