Spaces:
Sleeping
Sleeping
Update roop/ProcessMgr.py
Browse files- roop/ProcessMgr.py +17 -4
roop/ProcessMgr.py
CHANGED
@@ -6,7 +6,7 @@ import psutil
|
|
6 |
from roop.ProcessOptions import ProcessOptions
|
7 |
|
8 |
from roop.face_util import get_first_face, get_all_faces, rotate_anticlockwise, rotate_clockwise, clamp_cut_values
|
9 |
-
from roop.utilities import compute_cosine_distance, get_device, str_to_class
|
10 |
import roop.vr_util as vr
|
11 |
|
12 |
from typing import Any, List, Callable
|
@@ -117,6 +117,11 @@ class ProcessMgr():
|
|
117 |
roop.globals.g_desired_face_analysis=["landmark_3d_68", "landmark_2d_106","detection","recognition"]
|
118 |
if options.swap_mode == "all_female" or options.swap_mode == "all_male":
|
119 |
roop.globals.g_desired_face_analysis.append("genderage")
|
|
|
|
|
|
|
|
|
|
|
120 |
|
121 |
for p in self.processors:
|
122 |
newp = next((x for x in options.processors.keys() if x == p.processorname), None)
|
@@ -133,6 +138,14 @@ class ProcessMgr():
|
|
133 |
p = str_to_class(module, classname)
|
134 |
if p is not None:
|
135 |
extoption.update({"devicename": devicename})
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
136 |
p.Initialize(extoption)
|
137 |
newprocessors.append(p)
|
138 |
else:
|
@@ -417,7 +430,7 @@ class ProcessMgr():
|
|
417 |
num_faces_found += 1
|
418 |
temp_frame = self.process_face(self.options.selected_index, face, temp_frame)
|
419 |
|
420 |
-
elif self.options.swap_mode == "all_input":
|
421 |
for i,face in enumerate(faces):
|
422 |
num_faces_found += 1
|
423 |
if i < len(self.input_face_datas):
|
@@ -591,8 +604,8 @@ class ProcessMgr():
|
|
591 |
Kind of subsampling the cutout and aligned face image and faceswapping slices of it up to
|
592 |
the desired output resolution. This works around the current resolution limitations without using enhancers.
|
593 |
"""
|
594 |
-
model_output_size =
|
595 |
-
subsample_size = self.options.subsample_size
|
596 |
subsample_total = subsample_size // model_output_size
|
597 |
aligned_img, M = align_crop(frame, target_face.kps, subsample_size)
|
598 |
|
|
|
6 |
from roop.ProcessOptions import ProcessOptions
|
7 |
|
8 |
from roop.face_util import get_first_face, get_all_faces, rotate_anticlockwise, rotate_clockwise, clamp_cut_values
|
9 |
+
from roop.utilities import compute_cosine_distance, get_device, str_to_class, shuffle_array
|
10 |
import roop.vr_util as vr
|
11 |
|
12 |
from typing import Any, List, Callable
|
|
|
117 |
roop.globals.g_desired_face_analysis=["landmark_3d_68", "landmark_2d_106","detection","recognition"]
|
118 |
if options.swap_mode == "all_female" or options.swap_mode == "all_male":
|
119 |
roop.globals.g_desired_face_analysis.append("genderage")
|
120 |
+
elif options.swap_mode == "all_random":
|
121 |
+
# don't modify original list
|
122 |
+
self.input_face_datas = input_faces.copy()
|
123 |
+
shuffle_array(self.input_face_datas)
|
124 |
+
|
125 |
|
126 |
for p in self.processors:
|
127 |
newp = next((x for x in options.processors.keys() if x == p.processorname), None)
|
|
|
138 |
p = str_to_class(module, classname)
|
139 |
if p is not None:
|
140 |
extoption.update({"devicename": devicename})
|
141 |
+
if p.type == "swap":
|
142 |
+
if self.options.swap_modelname == "InSwapper 128":
|
143 |
+
extoption.update({"modelname": "inswapper_128.onnx"})
|
144 |
+
elif self.options.swap_modelname == "ReSwapper 128":
|
145 |
+
extoption.update({"modelname": "reswapper_128.onnx"})
|
146 |
+
elif self.options.swap_modelname == "ReSwapper 256":
|
147 |
+
extoption.update({"modelname": "reswapper_256.onnx"})
|
148 |
+
|
149 |
p.Initialize(extoption)
|
150 |
newprocessors.append(p)
|
151 |
else:
|
|
|
430 |
num_faces_found += 1
|
431 |
temp_frame = self.process_face(self.options.selected_index, face, temp_frame)
|
432 |
|
433 |
+
elif self.options.swap_mode == "all_input" or self.options.swap_mode == "all_random":
|
434 |
for i,face in enumerate(faces):
|
435 |
num_faces_found += 1
|
436 |
if i < len(self.input_face_datas):
|
|
|
604 |
Kind of subsampling the cutout and aligned face image and faceswapping slices of it up to
|
605 |
the desired output resolution. This works around the current resolution limitations without using enhancers.
|
606 |
"""
|
607 |
+
model_output_size = self.options.swap_output_size
|
608 |
+
subsample_size = max(self.options.subsample_size, model_output_size)
|
609 |
subsample_total = subsample_size // model_output_size
|
610 |
aligned_img, M = align_crop(frame, target_face.kps, subsample_size)
|
611 |
|