Update app.py
Browse files
app.py
CHANGED
@@ -2,7 +2,88 @@ import os
|
|
2 |
import cv2
|
3 |
import gradio as gr
|
4 |
from huggingface_hub import hf_hub_download
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
5 |
|
|
|
6 |
os.makedirs('output', exist_ok=True)
|
7 |
|
8 |
MODEL_PATH = {
|
@@ -22,6 +103,7 @@ def inference(img_path, model, focus_face=None):
|
|
22 |
cv2.imwrite(save_path, output)
|
23 |
return output, save_path
|
24 |
|
|
|
25 |
|
26 |
title = "AnimeGANv2: To produce your own animation 😶🌫️"
|
27 |
description = r"""### 🔥Demo AnimeGANv2: To produce your own animation. To use it, simply upload your image.<br>
|
|
|
2 |
import cv2
|
3 |
import gradio as gr
|
4 |
from huggingface_hub import hf_hub_download
|
5 |
+
import onnxruntime as ort
|
6 |
+
import cv2
|
7 |
+
import numpy as np
|
8 |
+
from facenet_pytorch import MTCNN
|
9 |
+
from torchvision import transforms
|
10 |
+
import torch
|
11 |
+
|
12 |
+
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
|
13 |
+
device_name = ort.get_device()
|
14 |
+
|
15 |
+
if device_name == 'cpu':
|
16 |
+
providers = ['CPUExecutionProvider']
|
17 |
+
elif device_name == 'GPU':
|
18 |
+
providers = ['CUDAExecutionProvider', 'CPUExecutionProvider']
|
19 |
+
|
20 |
+
#load model
|
21 |
+
mtcnn = MTCNN(image_size=256, margin=0, min_face_size=128, thresholds=[0.7, 0.8, 0.9], device=device)
|
22 |
+
|
23 |
+
# MTCNN for face detection with landmarks
|
24 |
+
def detect(img):
|
25 |
+
# Detect faces
|
26 |
+
batch_boxes, batch_probs, batch_points = mtcnn.detect(img, landmarks=True)
|
27 |
+
return batch_boxes, batch_points
|
28 |
+
|
29 |
+
|
30 |
+
# Expand the area around the detected face by margin {ratio} pixels
|
31 |
+
def margin_face(box, img_HW, margin=0.5):
|
32 |
+
x1, y1, x2, y2 = [c for c in box]
|
33 |
+
w, h = x2 - x1, y2 - y1
|
34 |
+
new_x1 = max(0, x1 - margin*w)
|
35 |
+
new_x2 = min(img_HW[1], x2 + margin * w)
|
36 |
+
x_d = min(x1-new_x1, new_x2-x2)
|
37 |
+
new_w = x2 -x1 + 2 * x_d
|
38 |
+
new_x1 = x1-x_d
|
39 |
+
new_x2 = x2+x_d
|
40 |
+
|
41 |
+
# new_h = 1.25 * new_w
|
42 |
+
new_h = 1.0 * new_w
|
43 |
+
|
44 |
+
if new_h>=h:
|
45 |
+
y_d = new_h-h
|
46 |
+
new_y1 = max(0, y1 - y_d//2)
|
47 |
+
new_y2 = min(img_HW[0], y2 + y_d//2)
|
48 |
+
else:
|
49 |
+
y_d = abs(new_h - h)
|
50 |
+
new_y1 = max(0, y1 + y_d // 2)
|
51 |
+
new_y2 = min(img_HW[0], y2 - y_d // 2)
|
52 |
+
return list(map(int, [new_x1, new_y1, new_x2, new_y2]))
|
53 |
+
|
54 |
+
def process_image(img, x32=True):
|
55 |
+
h, w = img.shape[:2]
|
56 |
+
if x32: # resize image to multiple of 32s
|
57 |
+
def to_32s(x):
|
58 |
+
return 256 if x < 256 else x - x%32
|
59 |
+
img = cv2.resize(img, (to_32s(w), to_32s(h)))
|
60 |
+
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB).astype(np.float32)/ 127.5 - 1.0
|
61 |
+
return img
|
62 |
+
|
63 |
+
def load_image(image_path, focus_face):
|
64 |
+
img0 = cv2.imread(image_path).astype(np.float32)
|
65 |
+
if focus_face == "Yes":
|
66 |
+
batch_boxes, batch_points = detect(img0)
|
67 |
+
if batch_boxes is None:
|
68 |
+
print("No face detected !")
|
69 |
+
return
|
70 |
+
[x1, y1, x2, y2] = margin_face(batch_boxes[0], img0.shape[:2])
|
71 |
+
img0 = img0[y1:y2, x1:x2]
|
72 |
+
img = process_image(img0)
|
73 |
+
img = np.expand_dims(img, axis=0)
|
74 |
+
return img, img0.shape[:2]
|
75 |
+
|
76 |
+
def convert(img, model, scale):
|
77 |
+
session = ort.InferenceSession(MODEL_PATH[model], providers=providers)
|
78 |
+
x = session.get_inputs()[0].name
|
79 |
+
y = session.get_outputs()[0].name
|
80 |
+
fake_img = session.run(None, {x : img})[0]
|
81 |
+
images = (np.squeeze(fake_img) + 1.) / 2 * 255
|
82 |
+
images = np.clip(images, 0, 255).astype(np.uint8)
|
83 |
+
output_image = cv2.resize(images, (scale[1],scale[0]))
|
84 |
+
return cv2.cvtColor(output_image, cv2.COLOR_RGB2BGR)
|
85 |
|
86 |
+
|
87 |
os.makedirs('output', exist_ok=True)
|
88 |
|
89 |
MODEL_PATH = {
|
|
|
103 |
cv2.imwrite(save_path, output)
|
104 |
return output, save_path
|
105 |
|
106 |
+
### Layout ###
|
107 |
|
108 |
title = "AnimeGANv2: To produce your own animation 😶🌫️"
|
109 |
description = r"""### 🔥Demo AnimeGANv2: To produce your own animation. To use it, simply upload your image.<br>
|