# insightface from __future__ import division import onnxruntime import cv2 import numpy as np from skimage import transform as trans def transform(data, center, output_size, scale, rotation): scale_ratio = scale rot = float(rotation) * np.pi / 180.0 t1 = trans.SimilarityTransform(scale=scale_ratio) cx = center[0] * scale_ratio cy = center[1] * scale_ratio t2 = trans.SimilarityTransform(translation=(-1 * cx, -1 * cy)) t3 = trans.SimilarityTransform(rotation=rot) t4 = trans.SimilarityTransform(translation=(output_size / 2, output_size / 2)) t = t1 + t2 + t3 + t4 M = t.params[0:2] cropped = cv2.warpAffine(data, M, (output_size, output_size), borderValue=0.0) return cropped, M def trans_points2d(pts, M): new_pts = np.zeros(shape=pts.shape, dtype=np.float32) for i in range(pts.shape[0]): pt = pts[i] new_pt = np.array([pt[0], pt[1], 1.], dtype=np.float32) new_pt = np.dot(M, new_pt) new_pts[i] = new_pt[0:2] return new_pts class Landmark106: def __init__(self, model_file, device="cuda"): if device == "cuda": providers = ["CUDAExecutionProvider", "CPUExecutionProvider"] else: providers = ["CPUExecutionProvider"] self.session = onnxruntime.InferenceSession(model_file, providers=providers) self.input_mean = 0.0 self.input_std = 1.0 self.input_size = (192, 192) input_cfg = self.session.get_inputs()[0] input_name = input_cfg.name outputs = self.session.get_outputs() output_names = [] for out in outputs: output_names.append(out.name) self.input_name = input_name self.output_names = output_names self.lmk_num = 106 def get(self, img, bbox): w, h = (bbox[2] - bbox[0]), (bbox[3] - bbox[1]) center = (bbox[2] + bbox[0]) / 2, (bbox[3] + bbox[1]) / 2 rotate = 0 _scale = self.input_size[0] / (max(w, h)*1.5) aimg, M = transform(img, center, self.input_size[0], _scale, rotate) input_size = tuple(aimg.shape[0:2][::-1]) blob = cv2.dnn.blobFromImage(aimg, 1.0/self.input_std, input_size, (self.input_mean, self.input_mean, self.input_mean), swapRB=True) pred = self.session.run(self.output_names, {self.input_name : blob})[0][0] pred = pred.reshape((-1, 2)) if self.lmk_num < pred.shape[0]: pred = pred[self.lmk_num*-1:,:] pred[:, 0:2] += 1 pred[:, 0:2] *= (self.input_size[0] // 2) IM = cv2.invertAffineTransform(M) pred = trans_points2d(pred, IM) return pred