# -*- coding: utf-8 -*- import torch import torch.nn as nn import torch.optim as optim from torchvision import models from torchvision.models.vgg import VGG # from BagData import dataloader from data_train import dataloader from data_val import dataloader_val import pdb import numpy as np import time import matplotlib.pyplot as plt # import visdom import os class FCN32s(nn.Module): def __init__(self, pretrained_net, n_class): super().__init__() self.n_class = n_class self.pretrained_net = pretrained_net self.relu = nn.ReLU(inplace=True) self.deconv1 = nn.ConvTranspose2d(512, 512, kernel_size=3, stride=2, padding=1, dilation=1, output_padding=1) self.bn1 = nn.BatchNorm2d(512) self.deconv2 = nn.ConvTranspose2d(512, 256, kernel_size=3, stride=2, padding=1, dilation=1, output_padding=1) self.bn2 = nn.BatchNorm2d(256) self.deconv3 = nn.ConvTranspose2d(256, 128, kernel_size=3, stride=2, padding=1, dilation=1, output_padding=1) self.bn3 = nn.BatchNorm2d(128) self.deconv4 = nn.ConvTranspose2d(128, 64, kernel_size=3, stride=2, padding=1, dilation=1, output_padding=1) self.bn4 = nn.BatchNorm2d(64) self.deconv5 = nn.ConvTranspose2d(64, 32, kernel_size=3, stride=2, padding=1, dilation=1, output_padding=1) self.bn5 = nn.BatchNorm2d(32) self.classifier = nn.Conv2d(32, n_class, kernel_size=1) def forward(self, x): output = self.pretrained_net(x) x5 = output['x5'] # size=(N, 512, x.H/32, x.W/32) score = self.bn1(self.relu(self.deconv1(x5))) # size=(N, 512, x.H/16, x.W/16) score = self.bn2(self.relu(self.deconv2(score))) # size=(N, 256, x.H/8, x.W/8) score = self.bn3(self.relu(self.deconv3(score))) # size=(N, 128, x.H/4, x.W/4) score = self.bn4(self.relu(self.deconv4(score))) # size=(N, 64, x.H/2, x.W/2) score = self.bn5(self.relu(self.deconv5(score))) # size=(N, 32, x.H, x.W) score = self.classifier(score) # size=(N, n_class, x.H/1, x.W/1) return score # size=(N, n_class, x.H/1, x.W/1) class FCN16s(nn.Module): def __init__(self, pretrained_net, n_class): super().__init__() self.n_class = n_class self.pretrained_net = pretrained_net self.relu = nn.ReLU(inplace=True) self.deconv1 = nn.ConvTranspose2d(512, 512, kernel_size=3, stride=2, padding=1, dilation=1, output_padding=1) self.bn1 = nn.BatchNorm2d(512) self.deconv2 = nn.ConvTranspose2d(512, 256, kernel_size=3, stride=2, padding=1, dilation=1, output_padding=1) self.bn2 = nn.BatchNorm2d(256) self.deconv3 = nn.ConvTranspose2d(256, 128, kernel_size=3, stride=2, padding=1, dilation=1, output_padding=1) self.bn3 = nn.BatchNorm2d(128) self.deconv4 = nn.ConvTranspose2d(128, 64, kernel_size=3, stride=2, padding=1, dilation=1, output_padding=1) self.bn4 = nn.BatchNorm2d(64) self.deconv5 = nn.ConvTranspose2d(64, 32, kernel_size=3, stride=2, padding=1, dilation=1, output_padding=1) self.bn5 = nn.BatchNorm2d(32) self.classifier = nn.Conv2d(32, n_class, kernel_size=1) def forward(self, x): output = self.pretrained_net(x) x5 = output['x5'] # size=(N, 512, x.H/32, x.W/32) x4 = output['x4'] # size=(N, 512, x.H/16, x.W/16) score = self.relu(self.deconv1(x5)) # size=(N, 512, x.H/16, x.W/16) score = self.bn1(score + x4) # element-wise add, size=(N, 512, x.H/16, x.W/16) score = self.bn2(self.relu(self.deconv2(score))) # size=(N, 256, x.H/8, x.W/8) score = self.bn3(self.relu(self.deconv3(score))) # size=(N, 128, x.H/4, x.W/4) score = self.bn4(self.relu(self.deconv4(score))) # size=(N, 64, x.H/2, x.W/2) score = self.bn5(self.relu(self.deconv5(score))) # size=(N, 32, x.H, x.W) score = self.classifier(score) # size=(N, n_class, x.H/1, x.W/1) return score # size=(N, n_class, x.H/1, x.W/1) class FCN8s(nn.Module): def __init__(self, pretrained_net, n_class): super().__init__() self.n_class = n_class self.pretrained_net = pretrained_net self.relu = nn.ReLU(inplace=True) self.deconv1 = nn.ConvTranspose2d(512, 512, kernel_size=3, stride=2, padding=1, dilation=1, output_padding=1) self.bn1 = nn.BatchNorm2d(512) self.deconv2 = nn.ConvTranspose2d(512, 256, kernel_size=3, stride=2, padding=1, dilation=1, output_padding=1) self.bn2 = nn.BatchNorm2d(256) self.deconv3 = nn.ConvTranspose2d(256, 128, kernel_size=3, stride=2, padding=1, dilation=1, output_padding=1) self.bn3 = nn.BatchNorm2d(128) self.deconv4 = nn.ConvTranspose2d(128, 64, kernel_size=3, stride=2, padding=1, dilation=1, output_padding=1) self.bn4 = nn.BatchNorm2d(64) self.deconv5 = nn.ConvTranspose2d(64, 32, kernel_size=3, stride=2, padding=1, dilation=1, output_padding=1) self.bn5 = nn.BatchNorm2d(32) self.classifier = nn.Conv2d(32, n_class, kernel_size=1) def forward(self, x): output = self.pretrained_net(x) x5 = output['x5'] # size=(N, 512, x.H/32, x.W/32) x4 = output['x4'] # size=(N, 512, x.H/16, x.W/16) x3 = output['x3'] # size=(N, 256, x.H/8, x.W/8) score = self.relu(self.deconv1(x5)) # size=(N, 512, x.H/16, x.W/16) score = self.bn1(score + x4) # element-wise add, size=(N, 512, x.H/16, x.W/16) score = self.relu(self.deconv2(score)) # size=(N, 256, x.H/8, x.W/8) score = self.bn2(score + x3) # element-wise add, size=(N, 256, x.H/8, x.W/8) score = self.bn3(self.relu(self.deconv3(score))) # size=(N, 128, x.H/4, x.W/4) score = self.bn4(self.relu(self.deconv4(score))) # size=(N, 64, x.H/2, x.W/2) score = self.bn5(self.relu(self.deconv5(score))) # size=(N, 32, x.H, x.W) score = self.classifier(score) # size=(N, n_class, x.H/1, x.W/1) return score # size=(N, n_class, x.H/1, x.W/1) class FCNs(nn.Module): def __init__(self, pretrained_net, n_class): super().__init__() self.n_class = n_class self.pretrained_net = pretrained_net self.relu = nn.ReLU(inplace=True) self.deconv1 = nn.ConvTranspose2d(512, 512, kernel_size=3, stride=2, padding=1, dilation=1, output_padding=1) self.bn1 = nn.BatchNorm2d(512) self.deconv2 = nn.ConvTranspose2d(512, 256, kernel_size=3, stride=2, padding=1, dilation=1, output_padding=1) self.bn2 = nn.BatchNorm2d(256) self.deconv3 = nn.ConvTranspose2d(256, 128, kernel_size=3, stride=2, padding=1, dilation=1, output_padding=1) self.bn3 = nn.BatchNorm2d(128) self.deconv4 = nn.ConvTranspose2d(128, 64, kernel_size=3, stride=2, padding=1, dilation=1, output_padding=1) self.bn4 = nn.BatchNorm2d(64) self.deconv5 = nn.ConvTranspose2d(64, 32, kernel_size=3, stride=2, padding=1, dilation=1, output_padding=1) self.bn5 = nn.BatchNorm2d(32) self.classifier = nn.Conv2d(32, n_class, kernel_size=1) def forward(self, x): output = self.pretrained_net(x) x5 = output['x5'] # size=(N, 512, x.H/32, x.W/32) x4 = output['x4'] # size=(N, 512, x.H/16, x.W/16) x3 = output['x3'] # size=(N, 256, x.H/8, x.W/8) x2 = output['x2'] # size=(N, 128, x.H/4, x.W/4) x1 = output['x1'] # size=(N, 64, x.H/2, x.W/2) score = self.bn1(self.relu(self.deconv1(x5))) # size=(N, 512, x.H/16, x.W/16) score = score + x4 # element-wise add, size=(N, 512, x.H/16, x.W/16) score = self.bn2(self.relu(self.deconv2(score))) # size=(N, 256, x.H/8, x.W/8) score = score + x3 # element-wise add, size=(N, 256, x.H/8, x.W/8) score = self.bn3(self.relu(self.deconv3(score))) # size=(N, 128, x.H/4, x.W/4) score = score + x2 # element-wise add, size=(N, 128, x.H/4, x.W/4) score = self.bn4(self.relu(self.deconv4(score))) # size=(N, 64, x.H/2, x.W/2) score = score + x1 # element-wise add, size=(N, 64, x.H/2, x.W/2) score = self.bn5(self.relu(self.deconv5(score))) # size=(N, 32, x.H, x.W) score = self.classifier(score) # size=(N, n_class, x.H/1, x.W/1) return score # size=(N, n_class, x.H/1, x.W/1) class VGGNet(VGG): def __init__(self, pretrained=True, model='vgg16', requires_grad=True, remove_fc=True, show_params=False): super().__init__(make_layers(cfg[model])) self.ranges = ranges[model] if pretrained: exec("self.load_state_dict(models.%s(pretrained=True).state_dict())" % model) if not requires_grad: for param in super().parameters(): param.requires_grad = False if remove_fc: # delete redundant fully-connected layer params, can save memory del self.classifier if show_params: for name, param in self.named_parameters(): print(name, param.size()) def forward(self, x): output = {} # get the output of each maxpooling layer (5 maxpool in VGG net) for idx in range(len(self.ranges)): for layer in range(self.ranges[idx][0], self.ranges[idx][1]): x = self.features[layer](x) output["x%d"%(idx+1)] = x return output ranges = { 'vgg11': ((0, 3), (3, 6), (6, 11), (11, 16), (16, 21)), 'vgg13': ((0, 5), (5, 10), (10, 15), (15, 20), (20, 25)), 'vgg16': ((0, 5), (5, 10), (10, 17), (17, 24), (24, 31)), 'vgg19': ((0, 5), (5, 10), (10, 19), (19, 28), (28, 37)) } # cropped version from https://github.com/pytorch/vision/blob/master/torchvision/models/vgg.py cfg = { 'vgg11': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'], 'vgg13': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'], 'vgg16': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'], 'vgg19': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'], } def make_layers(cfg, batch_norm=False): layers = [] in_channels = 3 for v in cfg: if v == 'M': layers += [nn.MaxPool2d(kernel_size=2, stride=2)] else: conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1) if batch_norm: layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)] else: layers += [conv2d, nn.ReLU(inplace=True)] in_channels = v return nn.Sequential(*layers) if __name__ == "__main__": # vis = visdom.Visdom() vgg_model = VGGNet(requires_grad=True) fcn_model = FCNs(pretrained_net=vgg_model, n_class=2) fcn_model = fcn_model.cuda() criterion = nn.BCELoss().cuda() optimizer = optim.SGD(fcn_model.parameters(), lr=1e-2, momentum=0.7) #input = torch.autograd.Variable(torch.randn(batch_size, 3, h, w)) #y = torch.autograd.Variable(torch.randn(batch_size, n_class, h, w), requires_grad=False) # saving_index =0 print('Train {},Validation {}'.format(len(dataloader)*4,len(dataloader_val)*4)) epochs=10 train_loss=[] valid_loss=[] for epo in range(epochs): start = time.time() epo_loss = 0 fcn_model.train() for item in dataloader: input = item['A'] y = item['B'] input = torch.autograd.Variable(input) y = torch.autograd.Variable(y) input = input.cuda() y = y.cuda() optimizer.zero_grad() output = fcn_model(input) output = nn.functional.sigmoid(output) loss = criterion(output, y) loss.backward() epo_loss += loss.item() optimizer.step() # output_np = output.cpu().data.numpy().copy() # output_np = np.argmin(output_np, axis=1) # y_np = y.cpu().data.numpy().copy() # y_np = np.argmin(y_np, axis=1) val_loss = 0 fcn_model.eval() with torch.no_grad(): for item in dataloader_val: input = item['A'] y = item['B'] input = torch.autograd.Variable(input) y = torch.autograd.Variable(y) input = input.cuda() y = y.cuda() optimizer.zero_grad() output = fcn_model(input) output = nn.functional.sigmoid(output) loss = criterion(output, y) val_loss += loss.item() # optimizer.step() # output_np = output.cpu().data.numpy().copy() # output_np = np.argmin(output_np, axis=1) # y_np = y.cpu().data.numpy().copy() # y_np = np.argmin(y_np, axis=1) end = time.time() trainingLoss = epo_loss/len(dataloader) validationLoss = val_loss/len(dataloader_val) train_loss.append(trainingLoss) valid_loss.append(validationLoss) print('epoch loss = %f'%(trainingLoss), 'validation loss = %f'%(validationLoss), 'time cost',end-start,'s') # if np.mod(saving_index, 2)==1: if not os.path.exists('checkpoints'): os.makedirs('checkpoints') torch.save(fcn_model, 'checkpoints/fcn_model_{}.pt'.format(epo)) print('saveing checkpoints/fcn_model_{}.pt \n'.format(epo)) np.save('training loss', train_loss) np.save('validation loss', valid_loss) # imagename = 'Loss' # print(a) y1 = train_loss y2 = valid_loss # print(y) x = np.array(range(epochs)) # print(x.shape) plt.plot(x,y1, label = 'trainingloss') plt.plot(x,y2, label = 'validloss') plt.xlabel('Epoch') plt.ylabel('Loss') plt.legend() plt.savefig('Loss'+'.png') plt.show()