Spaces:
Sleeping
Sleeping
| import torch | |
| from torch import nn | |
| from model_irse import Backbone | |
| # Use GPU if available | |
| if torch.cuda.is_available(): | |
| device = torch.device("cuda") | |
| else: | |
| device = torch.device("cpu") | |
| class IDLoss(nn.Module): | |
| def __init__(self, opts): | |
| super(IDLoss, self).__init__() | |
| print('Loading ResNet ArcFace') | |
| self.facenet = Backbone(input_size=112, num_layers=50, drop_ratio=0.6, mode='ir_se') | |
| self.facenet.load_state_dict(torch.load("./pretrained/model_ir_se50.pth", map_location=device)) | |
| self.pool = torch.nn.AdaptiveAvgPool2d((256, 256)) | |
| self.face_pool = torch.nn.AdaptiveAvgPool2d((112, 112)) | |
| self.facenet.eval() | |
| self.opts = opts | |
| def extract_feats(self, x): | |
| if x.shape[2] != 256: | |
| x = self.pool(x) | |
| x = x[:, :, 35:223, 32:220] # Crop interesting region | |
| x = self.face_pool(x) | |
| x_feats = self.facenet(x) | |
| return x_feats | |
| def forward(self, y_hat, y): | |
| n_samples = y.shape[0] | |
| y_feats = self.extract_feats(y) # Otherwise use the feature from there | |
| y_hat_feats = self.extract_feats(y_hat) | |
| y_feats = y_feats.detach() | |
| loss = 0 | |
| sim_improvement = 0 | |
| count = 0 | |
| for i in range(n_samples): | |
| diff_target = y_hat_feats[i].dot(y_feats[i]) | |
| loss += 1 - diff_target | |
| count += 1 | |
| return loss / count, sim_improvement / count | |