assile commited on
Commit
2aa3bf5
·
verified ·
1 Parent(s): c8448b1

Upload 7 files

Browse files
ui/globals.py ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ui_restart_server = False
2
+
3
+ SELECTION_FACES_DATA = None
4
+ ui_SELECTED_INPUT_FACE_INDEX = 0
5
+
6
+ ui_selected_enhancer = None
7
+ ui_upscale = None
8
+ ui_blend_ratio = None
9
+ ui_input_thumbs = []
10
+ ui_target_thumbs = []
11
+ ui_camera_frame = None
12
+ ui_selected_swap_model = None
13
+
14
+
15
+
16
+
17
+
ui/main.py ADDED
@@ -0,0 +1,96 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import time
3
+ import gradio as gr
4
+ import roop.globals
5
+ import roop.metadata
6
+ import roop.utilities as util
7
+ import ui.globals as uii
8
+
9
+ from ui.tabs.faceswap_tab import faceswap_tab
10
+ from ui.tabs.livecam_tab import livecam_tab
11
+ from ui.tabs.facemgr_tab import facemgr_tab
12
+ from ui.tabs.extras_tab import extras_tab
13
+ from ui.tabs.settings_tab import settings_tab
14
+
15
+ roop.globals.keep_fps = None
16
+ roop.globals.keep_frames = None
17
+ roop.globals.skip_audio = None
18
+ roop.globals.use_batch = None
19
+
20
+
21
+ def prepare_environment():
22
+ roop.globals.output_path = os.path.abspath(os.path.join(os.getcwd(), "output"))
23
+ os.makedirs(roop.globals.output_path, exist_ok=True)
24
+ if not roop.globals.CFG.use_os_temp_folder:
25
+ os.environ["TEMP"] = os.environ["TMP"] = os.path.abspath(os.path.join(os.getcwd(), "temp"))
26
+ os.makedirs(os.environ["TEMP"], exist_ok=True)
27
+ os.environ["GRADIO_TEMP_DIR"] = os.environ["TEMP"]
28
+ os.environ['GRADIO_ANALYTICS_ENABLED'] = '0'
29
+
30
+ def run():
31
+ from roop.core import decode_execution_providers, set_display_ui
32
+
33
+ prepare_environment()
34
+
35
+ set_display_ui(show_msg)
36
+ if roop.globals.CFG.provider == "cuda" and util.has_cuda_device() == False:
37
+ roop.globals.CFG.provider = "cpu"
38
+
39
+ roop.globals.execution_providers = decode_execution_providers([roop.globals.CFG.provider])
40
+ gputype = util.get_device()
41
+ if gputype == 'cuda':
42
+ util.print_cuda_info()
43
+
44
+ print(f'Using provider {roop.globals.execution_providers} - Device:{gputype}')
45
+
46
+ run_server = True
47
+ uii.ui_restart_server = False
48
+ mycss = """
49
+ span {color: var(--block-info-text-color)}
50
+ #fixedheight {
51
+ max-height: 238.4px;
52
+ overflow-y: auto !important;
53
+ }
54
+ .image-container.svelte-1l6wqyv {height: 100%}
55
+
56
+ """
57
+
58
+ while run_server:
59
+ server_name = roop.globals.CFG.server_name
60
+ if server_name is None or len(server_name) < 1:
61
+ server_name = None
62
+ server_port = roop.globals.CFG.server_port
63
+ if server_port <= 0:
64
+ server_port = None
65
+ ssl_verify = False if server_name == '0.0.0.0' else True
66
+ with gr.Blocks(title=f'{roop.metadata.name} {roop.metadata.version}', theme=roop.globals.CFG.selected_theme, css=mycss, delete_cache=(60, 86400)) as ui:
67
+ with gr.Row(variant='compact'):
68
+ gr.Markdown(f"### [{roop.metadata.name} {roop.metadata.version}](https://github.com/C0untFloyd/roop-unleashed)")
69
+ gr.HTML(util.create_version_html(), elem_id="versions")
70
+ faceswap_tab()
71
+ livecam_tab()
72
+ facemgr_tab()
73
+ extras_tab()
74
+ settings_tab()
75
+ launch_browser = roop.globals.CFG.launch_browser
76
+
77
+ uii.ui_restart_server = False
78
+ try:
79
+ ui.queue().launch(inbrowser=launch_browser, server_name=server_name, server_port=server_port, share=roop.globals.CFG.server_share, ssl_verify=ssl_verify, prevent_thread_lock=True, show_error=True)
80
+ except Exception as e:
81
+ print(f'Exception {e} when launching Gradio Server!')
82
+ uii.ui_restart_server = True
83
+ run_server = False
84
+ try:
85
+ while uii.ui_restart_server == False:
86
+ time.sleep(1.0)
87
+
88
+ except (KeyboardInterrupt, OSError):
89
+ print("Keyboard interruption in main thread... closing server.")
90
+ run_server = False
91
+ ui.close()
92
+
93
+
94
+ def show_msg(msg: str):
95
+ gr.Info(msg)
96
+
ui/tabs/extras_tab.py ADDED
@@ -0,0 +1,245 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import gradio as gr
3
+ import shutil
4
+ import roop.utilities as util
5
+ import roop.util_ffmpeg as ffmpeg
6
+ import roop.globals
7
+ from roop.utilities import clean_dir
8
+
9
+ frame_filters_map = {
10
+ "Colorize B/W Images (Deoldify Artistic)" : {"colorizer" : {"subtype": "deoldify_artistic"}},
11
+ "Colorize B/W Images (Deoldify Stable)" : {"colorizer" : {"subtype": "deoldify_stable"}},
12
+ "Background remove" : {"removebg" : {"subtype": ""}},
13
+ "Filter Stylize" : {"filter_generic" : {"subtype" : "stylize" }},
14
+ "Filter Detail Enhance" : {"filter_generic" : {"subtype" : "detailenhance" }},
15
+ "Filter Pencil Sketch" : {"filter_generic" : {"subtype" : "pencil" }},
16
+ "Filter Cartoon" : {"filter_generic" : {"subtype" : "cartoon" }},
17
+ "Filter C64" : {"filter_generic" : {"subtype" : "C64" }}
18
+ }
19
+
20
+ frame_upscalers_map = {
21
+ "ESRGAN x2" : {"upscale" : {"subtype": "esrganx2"}},
22
+ "ESRGAN x4" : {"upscale" : {"subtype": "esrganx4"}},
23
+ "LSDIR x4" : {"upscale" : {"subtype": "lsdirx4"}}
24
+ }
25
+
26
+ def extras_tab():
27
+ filternames = ["None"]
28
+ for f in frame_filters_map.keys():
29
+ filternames.append(f)
30
+ upscalernames = ["None"]
31
+ for f in frame_upscalers_map.keys():
32
+ upscalernames.append(f)
33
+
34
+ with gr.Tab("🎉 Extras"):
35
+ with gr.Row():
36
+ files_to_process = gr.Files(label='File(s) to process', file_count="multiple", file_types=["image", "video"])
37
+ with gr.Row(variant='panel'):
38
+ with gr.Accordion(label="Video/GIF", open=False):
39
+ with gr.Row(variant='panel'):
40
+ with gr.Column():
41
+ gr.Markdown("""
42
+ # Poor man's video editor
43
+ Re-encoding uses your configuration from the Settings Tab.
44
+ """)
45
+ with gr.Column():
46
+ cut_start_time = gr.Slider(0, 1000000, value=0, label="Start Frame", step=1.0, interactive=True)
47
+ with gr.Column():
48
+ cut_end_time = gr.Slider(1, 1000000, value=1, label="End Frame", step=1.0, interactive=True)
49
+ with gr.Column():
50
+ extras_chk_encode = gr.Checkbox(label='Re-encode videos (necessary for videos with different codecs)', value=False)
51
+ start_cut_video = gr.Button("Cut video")
52
+ start_extract_frames = gr.Button("Extract frames")
53
+ start_join_videos = gr.Button("Join videos")
54
+
55
+ with gr.Row(variant='panel'):
56
+ with gr.Column():
57
+ gr.Markdown("""
58
+ # Create video/gif from images
59
+ """)
60
+ with gr.Column():
61
+ extras_fps = gr.Slider(minimum=0, maximum=120, value=30, label="Video FPS", step=1.0, interactive=True)
62
+ extras_images_folder = gr.Textbox(show_label=False, placeholder="/content/", interactive=True)
63
+ with gr.Column():
64
+ extras_chk_creategif = gr.Checkbox(label='Create GIF from video', value=False)
65
+ extras_create_video=gr.Button("Create")
66
+ with gr.Row(variant='panel'):
67
+ with gr.Column():
68
+ gr.Markdown("""
69
+ # Create video from gif
70
+ """)
71
+ with gr.Column():
72
+ extras_video_fps = gr.Slider(minimum=0, maximum=120, value=0, label="Video FPS", step=1.0, interactive=True)
73
+ with gr.Column():
74
+ extras_create_video_from_gif=gr.Button("Create")
75
+ with gr.Row(variant='panel'):
76
+ with gr.Column(scale=2):
77
+ gr.Markdown("""
78
+ # Repair video
79
+
80
+ Uses FFMpeg to fix corrupt videos.
81
+ """)
82
+ with gr.Column():
83
+ extras_repair_video=gr.Button("Repair")
84
+
85
+
86
+ with gr.Row(variant='panel'):
87
+ with gr.Accordion(label="Full frame processing", open=True):
88
+ with gr.Row(variant='panel'):
89
+ filterselection = gr.Dropdown(filternames, value="None", label="Colorizer/FilterFX", interactive=True)
90
+ upscalerselection = gr.Dropdown(upscalernames, value="None", label="Enhancer", interactive=True)
91
+ with gr.Row(variant='panel'):
92
+ start_frame_process=gr.Button("Start processing")
93
+
94
+ with gr.Row():
95
+ gr.Button("👀 Open Output Folder", size='sm').click(fn=lambda: util.open_folder(roop.globals.output_path))
96
+ with gr.Row():
97
+ extra_files_output = gr.Files(label='Resulting output files', file_count="multiple")
98
+
99
+ start_cut_video.click(fn=on_cut_video, inputs=[files_to_process, cut_start_time, cut_end_time, extras_chk_encode], outputs=[extra_files_output])
100
+ start_extract_frames.click(fn=on_extras_extract_frames, inputs=[files_to_process], outputs=[extra_files_output])
101
+ start_join_videos.click(fn=on_join_videos, inputs=[files_to_process, extras_chk_encode], outputs=[extra_files_output])
102
+ extras_create_video.click(fn=on_extras_create_video, inputs=[files_to_process, extras_images_folder, extras_fps, extras_chk_creategif], outputs=[extra_files_output])
103
+ extras_create_video_from_gif.click(fn=on_extras_create_video_from_gif, inputs=[files_to_process, extras_video_fps], outputs=[extra_files_output])
104
+ extras_repair_video.click(fn=on_extras_repair_video, inputs=[files_to_process], outputs=[extra_files_output])
105
+ start_frame_process.click(fn=on_frame_process, inputs=[files_to_process, filterselection, upscalerselection], outputs=[extra_files_output])
106
+
107
+
108
+ def on_cut_video(files, cut_start_frame, cut_end_frame, reencode):
109
+ if files is None:
110
+ return None
111
+
112
+ resultfiles = []
113
+ for tf in files:
114
+ f = tf.name
115
+ destfile = util.get_destfilename_from_path(f, roop.globals.output_path, '_cut')
116
+ ffmpeg.cut_video(f, destfile, cut_start_frame, cut_end_frame, reencode)
117
+ if os.path.isfile(destfile):
118
+ resultfiles.append(destfile)
119
+ else:
120
+ gr.Error('Cutting video failed!')
121
+ return resultfiles
122
+
123
+
124
+ def on_join_videos(files, chk_encode):
125
+ if files is None:
126
+ return None
127
+
128
+ filenames = []
129
+ for f in files:
130
+ filenames.append(f.name)
131
+ destfile = util.get_destfilename_from_path(filenames[0], roop.globals.output_path, '_join')
132
+ sorted_filenames = util.sort_filenames_ignore_path(filenames)
133
+ ffmpeg.join_videos(sorted_filenames, destfile, not chk_encode)
134
+ resultfiles = []
135
+ if os.path.isfile(destfile):
136
+ resultfiles.append(destfile)
137
+ else:
138
+ gr.Error('Joining videos failed!')
139
+ return resultfiles
140
+
141
+ def on_extras_create_video_from_gif(files,fps):
142
+ if files is None:
143
+ return None
144
+
145
+ filenames = []
146
+ resultfiles = []
147
+ for f in files:
148
+ filenames.append(f.name)
149
+
150
+ destfilename = os.path.join(roop.globals.output_path, "img2video." + roop.globals.CFG.output_video_format)
151
+ ffmpeg.create_video_from_gif(filenames[0], destfilename)
152
+ if os.path.isfile(destfilename):
153
+ resultfiles.append(destfilename)
154
+ return resultfiles
155
+
156
+
157
+ def on_extras_repair_video(files):
158
+ if files is None:
159
+ return None
160
+
161
+ resultfiles = []
162
+ for tf in files:
163
+ f = tf.name
164
+ destfile = util.get_destfilename_from_path(f, roop.globals.output_path, '_repair')
165
+ ffmpeg.repair_video(f, destfile)
166
+ if os.path.isfile(destfile):
167
+ resultfiles.append(destfile)
168
+ else:
169
+ gr.Error('Repairing video failed!')
170
+ return resultfiles
171
+
172
+
173
+
174
+
175
+
176
+ def on_extras_create_video(files, images_path,fps, create_gif):
177
+ if images_path is None:
178
+ return None
179
+ resultfiles = []
180
+ if len(files) > 0 and util.is_video(files[0]) and create_gif:
181
+ destfilename = files[0]
182
+ else:
183
+ util.sort_rename_frames(os.path.dirname(images_path))
184
+ destfilename = os.path.join(roop.globals.output_path, "img2video." + roop.globals.CFG.output_video_format)
185
+ ffmpeg.create_video('', destfilename, fps, images_path)
186
+ if os.path.isfile(destfilename):
187
+ resultfiles.append(destfilename)
188
+ else:
189
+ return None
190
+ if create_gif:
191
+ gifname = util.get_destfilename_from_path(destfilename, './output', '.gif')
192
+ ffmpeg.create_gif_from_video(destfilename, gifname)
193
+ if os.path.isfile(destfilename):
194
+ resultfiles.append(gifname)
195
+ return resultfiles
196
+
197
+
198
+ def on_extras_extract_frames(files):
199
+ if files is None:
200
+ return None
201
+
202
+ resultfiles = []
203
+ for tf in files:
204
+ f = tf.name
205
+ resfolder = ffmpeg.extract_frames(f)
206
+ for file in os.listdir(resfolder):
207
+ outfile = os.path.join(resfolder, file)
208
+ if os.path.isfile(outfile):
209
+ resultfiles.append(outfile)
210
+ return resultfiles
211
+
212
+
213
+ def on_frame_process(files, filterselection, upscaleselection):
214
+ import pathlib
215
+ from roop.core import batch_process_with_options
216
+ from roop.ProcessEntry import ProcessEntry
217
+ from roop.ProcessOptions import ProcessOptions
218
+ from ui.main import prepare_environment
219
+
220
+
221
+ if files is None:
222
+ return None
223
+
224
+ if roop.globals.CFG.clear_output:
225
+ clean_dir(roop.globals.output_path)
226
+ prepare_environment()
227
+ list_files_process : list[ProcessEntry] = []
228
+
229
+ for tf in files:
230
+ list_files_process.append(ProcessEntry(tf.name, 0,0, 0))
231
+
232
+ processoroptions = {}
233
+ filter = next((x for x in frame_filters_map.keys() if x == filterselection), None)
234
+ if filter is not None:
235
+ processoroptions.update(frame_filters_map[filter])
236
+ filter = next((x for x in frame_upscalers_map.keys() if x == upscaleselection), None)
237
+ if filter is not None:
238
+ processoroptions.update(frame_upscalers_map[filter])
239
+ options = ProcessOptions(None, processoroptions, 0, 0, "all", 0, None, None, 0, 128, False, False)
240
+ batch_process_with_options(list_files_process, options, None)
241
+ outdir = pathlib.Path(roop.globals.output_path)
242
+ outfiles = [str(item) for item in outdir.rglob("*") if item.is_file()]
243
+ return outfiles
244
+
245
+
ui/tabs/facemgr_tab.py ADDED
@@ -0,0 +1,187 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import shutil
3
+ import cv2
4
+ import gradio as gr
5
+ import roop.utilities as util
6
+ import roop.globals
7
+ from roop.face_util import extract_face_images
8
+ from roop.capturer import get_video_frame, get_video_frame_total
9
+ from typing import List, Tuple, Optional
10
+ from roop.typing import Frame, Face, FaceSet
11
+
12
+ selected_face_index = -1
13
+ thumbs = []
14
+ images = []
15
+
16
+
17
+ def facemgr_tab() -> None:
18
+ with gr.Tab("👨‍👩‍👧‍👦 Face Management"):
19
+ with gr.Row():
20
+ gr.Markdown("""
21
+ # Create blending facesets
22
+ Add multiple reference images into a faceset file.
23
+ """)
24
+ with gr.Row():
25
+ videoimagefst = gr.Image(label="Cut face from video frame", height=576, interactive=False, visible=True, format="jpeg")
26
+ with gr.Row():
27
+ frame_num_fst = gr.Slider(1, 1, value=1, label="Frame Number", info='0:00:00', step=1.0, interactive=False)
28
+ fb_cutfromframe = gr.Button("Use faces from this frame", variant='secondary', interactive=False)
29
+ with gr.Row():
30
+ fb_facesetfile = gr.Files(label='Faceset', file_count='single', file_types=['.fsz'], interactive=True)
31
+ fb_files = gr.Files(label='Input Files', file_count="multiple", file_types=["image", "video"], interactive=True)
32
+ with gr.Row():
33
+ with gr.Column():
34
+ gr.Button("👀 Open Output Folder", size='sm').click(fn=lambda: util.open_folder(roop.globals.output_path))
35
+ with gr.Column():
36
+ gr.Markdown(' ')
37
+ with gr.Row():
38
+ faces = gr.Gallery(label="Faces in this Faceset", allow_preview=True, preview=True, height=128, object_fit="scale-down")
39
+ with gr.Row():
40
+ fb_remove = gr.Button("Remove selected", variant='secondary')
41
+ fb_update = gr.Button("Create/Update Faceset file", variant='primary')
42
+ fb_clear = gr.Button("Clear all", variant='stop')
43
+
44
+ fb_facesetfile.change(fn=on_faceset_changed, inputs=[fb_facesetfile], outputs=[faces])
45
+ fb_files.change(fn=on_fb_files_changed, inputs=[fb_files], outputs=[faces, videoimagefst, frame_num_fst, fb_cutfromframe])
46
+ fb_update.click(fn=on_update_clicked, outputs=[fb_facesetfile])
47
+ fb_remove.click(fn=on_remove_clicked, outputs=[faces])
48
+ fb_clear.click(fn=on_clear_clicked, outputs=[faces, fb_files, fb_facesetfile])
49
+ fb_cutfromframe.click(fn=on_cutfromframe_clicked, inputs=[fb_files, frame_num_fst], outputs=[faces])
50
+ frame_num_fst.release(fn=on_frame_num_fst_changed, inputs=[fb_files, frame_num_fst], outputs=[videoimagefst])
51
+ faces.select(fn=on_face_selected)
52
+
53
+
54
+ def on_faceset_changed(faceset, progress=gr.Progress()) -> List[Frame]:
55
+ global thumbs, images
56
+
57
+ if faceset is None:
58
+ return thumbs
59
+
60
+ thumbs.clear()
61
+ filename = faceset.name
62
+
63
+ if filename.lower().endswith('fsz'):
64
+ progress(0, desc="Retrieving faces from Faceset File", )
65
+ unzipfolder = os.path.join(os.environ["TEMP"], 'faceset')
66
+ if os.path.isdir(unzipfolder):
67
+ shutil.rmtree(unzipfolder)
68
+ util.mkdir_with_umask(unzipfolder)
69
+ util.unzip(filename, unzipfolder)
70
+ for file in os.listdir(unzipfolder):
71
+ if file.endswith(".png"):
72
+ SELECTION_FACES_DATA = extract_face_images(os.path.join(unzipfolder,file), (False, 0), 0.5)
73
+ if len(SELECTION_FACES_DATA) < 1:
74
+ gr.Warning(f"No face detected in {file}!")
75
+ for f in SELECTION_FACES_DATA:
76
+ image = f[1]
77
+ images.append(image)
78
+ thumbs.append(util.convert_to_gradio(image))
79
+
80
+ return thumbs
81
+
82
+
83
+ def on_fb_files_changed(inputfiles, progress=gr.Progress()) -> Tuple[List[Frame], Optional[gr.Image], Optional[gr.Slider], Optional[gr.Button]]:
84
+ global thumbs, images, total_frames, current_video_fps
85
+
86
+ if inputfiles is None or len(inputfiles) < 1:
87
+ return thumbs, None, None, None
88
+
89
+ progress(0, desc="Retrieving faces from images", )
90
+ slider = None
91
+ video_image = None
92
+ cut_button = None
93
+ for f in inputfiles:
94
+ source_path = f.name
95
+ if util.has_image_extension(source_path):
96
+ slider = gr.Slider(interactive=False)
97
+ video_image = gr.Image(interactive=False)
98
+ cut_button = gr.Button(interactive=False)
99
+ roop.globals.source_path = source_path
100
+ SELECTION_FACES_DATA = extract_face_images(roop.globals.source_path, (False, 0), 0.5)
101
+ for f in SELECTION_FACES_DATA:
102
+ image = f[1]
103
+ images.append(image)
104
+ thumbs.append(util.convert_to_gradio(image))
105
+ elif util.is_video(source_path) or source_path.lower().endswith('gif'):
106
+ total_frames = get_video_frame_total(source_path)
107
+ current_video_fps = util.detect_fps(source_path)
108
+ cut_button = gr.Button(interactive=True)
109
+ video_image, slider = display_video_frame(source_path, 1, total_frames)
110
+
111
+ return thumbs, video_image, slider, cut_button
112
+
113
+
114
+ def display_video_frame(filename: str, frame_num: int, total: int=0) -> Tuple[gr.Image, gr.Slider]:
115
+ global current_video_fps
116
+
117
+ current_frame = get_video_frame(filename, frame_num)
118
+ if current_video_fps == 0:
119
+ current_video_fps = 1
120
+ secs = (frame_num - 1) / current_video_fps
121
+ minutes = secs / 60
122
+ secs = secs % 60
123
+ hours = minutes / 60
124
+ minutes = minutes % 60
125
+ milliseconds = (secs - int(secs)) * 1000
126
+ timeinfo = f"{int(hours):0>2}:{int(minutes):0>2}:{int(secs):0>2}.{int(milliseconds):0>3}"
127
+ if total > 0:
128
+ return gr.Image(value=util.convert_to_gradio(current_frame), interactive=True), gr.Slider(info=timeinfo, minimum=1, maximum=total, interactive=True)
129
+ return gr.Image(value=util.convert_to_gradio(current_frame), interactive=True), gr.Slider(info=timeinfo, interactive=True)
130
+
131
+
132
+ def on_face_selected(evt: gr.SelectData) -> None:
133
+ global selected_face_index
134
+
135
+ if evt is not None:
136
+ selected_face_index = evt.index
137
+
138
+ def on_frame_num_fst_changed(inputfiles: List[gr.Files], frame_num: int) -> Frame:
139
+ filename = inputfiles[0].name
140
+ video_image, _ = display_video_frame(filename, frame_num, 0)
141
+ return video_image
142
+
143
+
144
+ def on_cutfromframe_clicked(inputfiles: List[gr.Files], frame_num: int) -> List[Frame]:
145
+ global thumbs
146
+
147
+ filename = inputfiles[0].name
148
+ SELECTION_FACES_DATA = extract_face_images(filename, (True, frame_num), 0.5)
149
+ for f in SELECTION_FACES_DATA:
150
+ image = f[1]
151
+ images.append(image)
152
+ thumbs.append(util.convert_to_gradio(image))
153
+ return thumbs
154
+
155
+
156
+ def on_remove_clicked() -> List[Frame]:
157
+ global thumbs, images, selected_face_index
158
+
159
+ if len(thumbs) > selected_face_index:
160
+ f = thumbs.pop(selected_face_index)
161
+ del f
162
+ f = images.pop(selected_face_index)
163
+ del f
164
+ return thumbs
165
+
166
+ def on_clear_clicked() -> Tuple[List[Frame], None, None]:
167
+ global thumbs, images
168
+
169
+ thumbs.clear()
170
+ images.clear()
171
+ return thumbs, None, None
172
+
173
+
174
+ def on_update_clicked() -> Optional[str]:
175
+ if len(images) < 1:
176
+ gr.Warning(f"No faces to create faceset from!")
177
+ return None
178
+
179
+ imgnames = []
180
+ for index,img in enumerate(images):
181
+ filename = os.path.join(roop.globals.output_path, f'{index}.png')
182
+ cv2.imwrite(filename, img)
183
+ imgnames.append(filename)
184
+
185
+ finalzip = os.path.join(roop.globals.output_path, 'faceset.fsz')
186
+ util.zip(imgnames, finalzip)
187
+ return finalzip
ui/tabs/faceswap_tab.py ADDED
@@ -0,0 +1,835 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import shutil
3
+ import pathlib
4
+ import gradio as gr
5
+ import roop.utilities as util
6
+ import roop.globals
7
+ import ui.globals
8
+ from roop.face_util import extract_face_images, create_blank_image
9
+ from roop.capturer import get_video_frame, get_video_frame_total, get_image_frame
10
+ from roop.ProcessEntry import ProcessEntry
11
+ from roop.ProcessOptions import ProcessOptions
12
+ from roop.FaceSet import FaceSet
13
+ from roop.utilities import clean_dir
14
+
15
+ last_image = None
16
+
17
+
18
+ IS_INPUT = True
19
+ SELECTED_FACE_INDEX = 0
20
+
21
+ SELECTED_INPUT_FACE_INDEX = 0
22
+ SELECTED_TARGET_FACE_INDEX = 0
23
+
24
+ input_faces = None
25
+ target_faces = None
26
+ face_selection = None
27
+ previewimage = None
28
+
29
+ selected_preview_index = 0
30
+
31
+ is_processing = False
32
+
33
+ list_files_process : list[ProcessEntry] = []
34
+ model_swap_choices = ["InSwapper 128", "ReSwapper 128", "ReSwapper 256"]
35
+
36
+ no_face_choices = ["Use untouched original frame","Retry rotated", "Skip Frame", "Skip Frame if no similar face", "Use last swapped"]
37
+ swap_choices = ["First found", "All input faces", "All input faces (random)", "All female", "All male", "All faces", "Selected face"]
38
+
39
+ current_video_fps = 50
40
+
41
+ manual_masking = False
42
+
43
+
44
+ def faceswap_tab():
45
+ global no_face_choices, previewimage
46
+
47
+ with gr.Tab("🎭 Face Swap"):
48
+ with gr.Row(variant='panel'):
49
+ with gr.Column(scale=2):
50
+ with gr.Row():
51
+ input_faces = gr.Gallery(label="Input faces gallery", allow_preview=False, preview=False, height=138, columns=64, object_fit="scale-down", interactive=False)
52
+ target_faces = gr.Gallery(label="Target faces gallery", allow_preview=False, preview=False, height=138, columns=64, object_fit="scale-down", interactive=False)
53
+ with gr.Row():
54
+ bt_move_left_input = gr.Button("⬅ Move left", size='sm')
55
+ bt_move_right_input = gr.Button("➡ Move right", size='sm')
56
+ bt_move_left_target = gr.Button("⬅ Move left", size='sm')
57
+ bt_move_right_target = gr.Button("➡ Move right", size='sm')
58
+ with gr.Row():
59
+ bt_remove_selected_input_face = gr.Button("❌ Remove selected", size='sm')
60
+ bt_clear_input_faces = gr.Button("💥 Clear all", variant='stop', size='sm')
61
+ bt_remove_selected_target_face = gr.Button("❌ Remove selected", size='sm')
62
+ bt_add_local = gr.Button('Add local files from', size='sm')
63
+
64
+ with gr.Row():
65
+ with gr.Column(scale=2):
66
+ with gr.Accordion(label="Advanced Masking", open=False):
67
+ chk_showmaskoffsets = gr.Checkbox(
68
+ label="Show mask overlay in preview",
69
+ value=False,
70
+ interactive=True,
71
+ )
72
+ chk_restoreoriginalmouth = gr.Checkbox(
73
+ label="Restore original mouth area",
74
+ value=False,
75
+ interactive=True,
76
+ )
77
+ mask_top = gr.Slider(
78
+ 0,
79
+ 1.0,
80
+ value=0,
81
+ label="Offset Face Top",
82
+ step=0.01,
83
+ interactive=True,
84
+ )
85
+ mask_bottom = gr.Slider(
86
+ 0,
87
+ 1.0,
88
+ value=0,
89
+ label="Offset Face Bottom",
90
+ step=0.01,
91
+ interactive=True,
92
+ )
93
+ mask_left = gr.Slider(
94
+ 0,
95
+ 1.0,
96
+ value=0,
97
+ label="Offset Face Left",
98
+ step=0.01,
99
+ interactive=True,
100
+ )
101
+ mask_right = gr.Slider(
102
+ 0,
103
+ 1.0,
104
+ value=0,
105
+ label="Offset Face Right",
106
+ step=0.01,
107
+ interactive=True,
108
+ )
109
+ mask_erosion = gr.Slider(
110
+ 1.0,
111
+ 3.0,
112
+ value=1.0,
113
+ label="Erosion Iterations",
114
+ step=1.00,
115
+ interactive=True,
116
+ )
117
+ mask_blur = gr.Slider(
118
+ 10.0,
119
+ 50.0,
120
+ value=20.0,
121
+ label="Blur size",
122
+ step=1.00,
123
+ interactive=True,
124
+ )
125
+ bt_toggle_masking = gr.Button(
126
+ "Toggle manual masking", variant="secondary", size="sm"
127
+ )
128
+ selected_mask_engine = gr.Dropdown(
129
+ ["None", "Clip2Seg", "DFL XSeg"],
130
+ value="None",
131
+ label="Face masking engine",
132
+ )
133
+ clip_text = gr.Textbox(
134
+ label="List of objects to mask and restore back on fake face",
135
+ value="cup,hands,hair,banana",
136
+ interactive=False,
137
+ )
138
+ bt_preview_mask = gr.Button(
139
+ "👥 Show Mask Preview", variant="secondary"
140
+ )
141
+ with gr.Column(scale=2):
142
+ local_folder = gr.Textbox(show_label=False, placeholder="/content/", interactive=True)
143
+ with gr.Row(variant='panel'):
144
+ bt_srcfiles = gr.Files(label='Source Images or Facesets', file_count="multiple", file_types=["image", ".fsz", ".webp"], elem_id='filelist', height=233)
145
+ bt_destfiles = gr.Files(label='Target File(s)', file_count="multiple", file_types=["image", "video", ".webp"], elem_id='filelist', height=233)
146
+ with gr.Row(variant='panel'):
147
+ ui.globals.ui_selected_swap_model = gr.Dropdown(model_swap_choices, value=model_swap_choices[0], label="Specify Face Swap Model")
148
+ forced_fps = gr.Slider(minimum=0, maximum=120, value=0, label="Video FPS", info='Overrides detected fps if not 0', step=1.0, interactive=True, container=True)
149
+
150
+ with gr.Column(scale=2):
151
+ previewimage = gr.Image(label="Preview Image", height=576, interactive=False, visible=True, format=get_gradio_output_format())
152
+ maskimage = gr.ImageEditor(label="Manual mask Image", sources=["clipboard"], transforms="", type="numpy",
153
+ brush=gr.Brush(color_mode="fixed", colors=["rgba(255, 255, 255, 1"]), interactive=True, visible=False)
154
+ with gr.Row(variant='panel'):
155
+ fake_preview = gr.Checkbox(label="Face swap frames", value=False)
156
+ bt_refresh_preview = gr.Button("🔄 Refresh", variant='secondary', size='sm')
157
+ bt_use_face_from_preview = gr.Button("Use Face from this Frame", variant='primary', size='sm')
158
+ with gr.Row():
159
+ preview_frame_num = gr.Slider(1, 1, value=1, label="Frame Number", info='0:00:00', step=1.0, interactive=True)
160
+ with gr.Row():
161
+ text_frame_clip = gr.Markdown('Processing frame range [0 - 0]')
162
+ set_frame_start = gr.Button("⬅ Set as Start", size='sm')
163
+ set_frame_end = gr.Button("➡ Set as End", size='sm')
164
+ with gr.Row(visible=False) as dynamic_face_selection:
165
+ with gr.Column(scale=2):
166
+ face_selection = gr.Gallery(label="Detected faces", allow_preview=False, preview=False, height=138, object_fit="cover", columns=32)
167
+ with gr.Column():
168
+ bt_faceselect = gr.Button("☑ Use selected face", size='sm')
169
+ bt_cancelfaceselect = gr.Button("Done", size='sm')
170
+ with gr.Column():
171
+ gr.Markdown(' ')
172
+
173
+ with gr.Row(variant='panel'):
174
+ with gr.Column(scale=1):
175
+ selected_face_detection = gr.Dropdown(swap_choices, value="First found", label="Specify face selection for swapping")
176
+ with gr.Column(scale=1):
177
+ num_swap_steps = gr.Slider(1, 5, value=1, step=1.0, label="Number of swapping steps", info="More steps may increase likeness")
178
+ with gr.Column(scale=2):
179
+ ui.globals.ui_selected_enhancer = gr.Dropdown(["None", "Codeformer", "DMDNet", "GFPGAN", "GPEN", "Restoreformer++"], value="None", label="Select post-processing")
180
+
181
+ with gr.Row(variant='panel'):
182
+ with gr.Column(scale=1):
183
+ max_face_distance = gr.Slider(0.01, 1.0, value=0.65, label="Max Face Similarity Threshold", info="0.0 = identical 1.0 = no similarity")
184
+ with gr.Column(scale=1):
185
+ ui.globals.ui_upscale = gr.Dropdown(["128px", "256px", "512px"], value="128px", label="Subsample upscale to", interactive=True)
186
+ with gr.Column(scale=2):
187
+ ui.globals.ui_blend_ratio = gr.Slider(0.0, 1.0, value=0.65, label="Original/Enhanced image blend ratio", info="Only used with active post-processing")
188
+
189
+ with gr.Row(variant='panel'):
190
+ with gr.Column(scale=1):
191
+ video_swapping_method = gr.Dropdown(["Extract Frames to media","In-Memory processing"], value="In-Memory processing", label="Select video processing method", interactive=True)
192
+ no_face_action = gr.Dropdown(choices=no_face_choices, value=no_face_choices[0], label="Action on no face detected", interactive=True)
193
+ vr_mode = gr.Checkbox(label="VR Mode", value=False)
194
+ with gr.Column(scale=1):
195
+ with gr.Group():
196
+ autorotate = gr.Checkbox(label="Auto rotate horizontal Faces", value=True)
197
+ roop.globals.skip_audio = gr.Checkbox(label="Skip audio", value=False)
198
+ roop.globals.keep_frames = gr.Checkbox(label="Keep Frames (relevant only when extracting frames)", value=False)
199
+ roop.globals.wait_after_extraction = gr.Checkbox(label="Wait for user key press before creating video ", value=False)
200
+
201
+ with gr.Row(variant='panel'):
202
+ with gr.Column():
203
+ bt_start = gr.Button("▶ Start", variant='primary')
204
+ with gr.Column():
205
+ bt_stop = gr.Button("⏹ Stop", variant='secondary', interactive=False)
206
+ gr.Button("👀 Open Output Folder", size='sm').click(fn=lambda: util.open_folder(roop.globals.output_path))
207
+ with gr.Column(scale=2):
208
+ output_method = gr.Dropdown(["File","Virtual Camera", "Both"], value="File", label="Select Output Method", interactive=True)
209
+ with gr.Row(variant='panel'):
210
+ with gr.Column():
211
+ resultfiles = gr.Files(label='Processed File(s)', interactive=False)
212
+ with gr.Column():
213
+ resultimage = gr.Image(type='filepath', label='Final Image', interactive=False )
214
+ resultvideo = gr.Video(label='Final Video', interactive=False, visible=False)
215
+
216
+ previewinputs = [ui.globals.ui_selected_swap_model, preview_frame_num, bt_destfiles, fake_preview, ui.globals.ui_selected_enhancer, selected_face_detection,
217
+ max_face_distance, ui.globals.ui_blend_ratio, selected_mask_engine, clip_text, no_face_action, vr_mode, autorotate, maskimage, chk_showmaskoffsets, chk_restoreoriginalmouth, num_swap_steps, ui.globals.ui_upscale]
218
+ previewoutputs = [previewimage, maskimage, preview_frame_num]
219
+ input_faces.select(on_select_input_face, None, None).success(fn=on_preview_frame_changed, inputs=previewinputs, outputs=previewoutputs)
220
+
221
+ bt_move_left_input.click(fn=move_selected_input, inputs=[bt_move_left_input], outputs=[input_faces])
222
+ bt_move_right_input.click(fn=move_selected_input, inputs=[bt_move_right_input], outputs=[input_faces])
223
+ bt_move_left_target.click(fn=move_selected_target, inputs=[bt_move_left_target], outputs=[target_faces])
224
+ bt_move_right_target.click(fn=move_selected_target, inputs=[bt_move_right_target], outputs=[target_faces])
225
+
226
+ bt_remove_selected_input_face.click(fn=remove_selected_input_face, outputs=[input_faces])
227
+ bt_srcfiles.change(fn=on_srcfile_changed, show_progress='full', inputs=bt_srcfiles, outputs=[dynamic_face_selection, face_selection, input_faces, bt_srcfiles])
228
+
229
+ mask_top.release(fn=on_mask_top_changed, inputs=[mask_top], show_progress='hidden')
230
+ mask_bottom.release(fn=on_mask_bottom_changed, inputs=[mask_bottom], show_progress='hidden')
231
+ mask_left.release(fn=on_mask_left_changed, inputs=[mask_left], show_progress='hidden')
232
+ mask_right.release(fn=on_mask_right_changed, inputs=[mask_right], show_progress='hidden')
233
+ mask_erosion.release(fn=on_mask_erosion_changed, inputs=[mask_erosion], show_progress='hidden')
234
+ mask_blur.release(fn=on_mask_blur_changed, inputs=[mask_blur], show_progress='hidden')
235
+ selected_mask_engine.change(fn=on_mask_engine_changed, inputs=[selected_mask_engine], outputs=[clip_text], show_progress='hidden')
236
+
237
+ target_faces.select(on_select_target_face, None, None)
238
+ bt_remove_selected_target_face.click(fn=remove_selected_target_face, outputs=[target_faces])
239
+
240
+ forced_fps.change(fn=on_fps_changed, inputs=[forced_fps], show_progress='hidden')
241
+ bt_destfiles.change(fn=on_destfiles_changed, inputs=[bt_destfiles], outputs=[preview_frame_num, text_frame_clip], show_progress='hidden').success(fn=on_preview_frame_changed, inputs=previewinputs, outputs=previewoutputs, show_progress='hidden')
242
+ bt_destfiles.select(fn=on_destfiles_selected, outputs=[preview_frame_num, text_frame_clip, forced_fps], show_progress='hidden').success(fn=on_preview_frame_changed, inputs=previewinputs, outputs=previewoutputs, show_progress='hidden')
243
+ bt_destfiles.clear(fn=on_clear_destfiles, outputs=[target_faces, selected_face_detection])
244
+ resultfiles.select(fn=on_resultfiles_selected, inputs=[resultfiles], outputs=[resultimage, resultvideo])
245
+
246
+ face_selection.select(on_select_face, None, None)
247
+ bt_faceselect.click(fn=on_selected_face, outputs=[input_faces, target_faces, selected_face_detection])
248
+ bt_cancelfaceselect.click(fn=on_end_face_selection, outputs=[dynamic_face_selection, face_selection])
249
+
250
+ bt_clear_input_faces.click(fn=on_clear_input_faces, outputs=[input_faces])
251
+
252
+ bt_add_local.click(fn=on_add_local_folder, inputs=[local_folder], outputs=[bt_destfiles])
253
+ bt_preview_mask.click(fn=on_preview_mask, inputs=[ui.globals.ui_selected_swap_model, preview_frame_num, bt_destfiles, clip_text, selected_mask_engine], outputs=[previewimage])
254
+
255
+ start_event = bt_start.click(fn=start_swap,
256
+ inputs=[ui.globals.ui_selected_swap_model, output_method, ui.globals.ui_selected_enhancer, selected_face_detection, roop.globals.keep_frames, roop.globals.wait_after_extraction,
257
+ roop.globals.skip_audio, max_face_distance, ui.globals.ui_blend_ratio, selected_mask_engine, clip_text,video_swapping_method, no_face_action, vr_mode, autorotate, chk_restoreoriginalmouth, num_swap_steps, ui.globals.ui_upscale, maskimage],
258
+ outputs=[bt_start, bt_stop, resultfiles], show_progress='full')
259
+ after_swap_event = start_event.success(fn=on_resultfiles_finished, inputs=[resultfiles], outputs=[resultimage, resultvideo])
260
+
261
+ bt_stop.click(fn=stop_swap, cancels=[start_event, after_swap_event], outputs=[bt_start, bt_stop], queue=False)
262
+
263
+ bt_refresh_preview.click(fn=on_preview_frame_changed, inputs=previewinputs, outputs=previewoutputs)
264
+ bt_toggle_masking.click(fn=on_toggle_masking, inputs=[previewimage, maskimage], outputs=[previewimage, maskimage])
265
+ fake_preview.change(fn=on_preview_frame_changed, inputs=previewinputs, outputs=previewoutputs)
266
+ preview_frame_num.release(fn=on_preview_frame_changed, inputs=previewinputs, outputs=previewoutputs, show_progress='hidden', )
267
+ bt_use_face_from_preview.click(fn=on_use_face_from_selected, show_progress='full', inputs=[bt_destfiles, preview_frame_num], outputs=[dynamic_face_selection, face_selection, target_faces, selected_face_detection])
268
+ set_frame_start.click(fn=on_set_frame, inputs=[set_frame_start, preview_frame_num], outputs=[text_frame_clip])
269
+ set_frame_end.click(fn=on_set_frame, inputs=[set_frame_end, preview_frame_num], outputs=[text_frame_clip])
270
+
271
+
272
+ def on_mask_top_changed(mask_offset):
273
+ set_mask_offset(0, mask_offset)
274
+
275
+ def on_mask_bottom_changed(mask_offset):
276
+ set_mask_offset(1, mask_offset)
277
+
278
+ def on_mask_left_changed(mask_offset):
279
+ set_mask_offset(2, mask_offset)
280
+
281
+ def on_mask_right_changed(mask_offset):
282
+ set_mask_offset(3, mask_offset)
283
+
284
+ def on_mask_erosion_changed(mask_offset):
285
+ set_mask_offset(4, mask_offset)
286
+ def on_mask_blur_changed(mask_offset):
287
+ set_mask_offset(5, mask_offset)
288
+
289
+
290
+ def set_mask_offset(index, mask_offset):
291
+ global SELECTED_INPUT_FACE_INDEX
292
+
293
+ if len(roop.globals.INPUT_FACESETS) > SELECTED_INPUT_FACE_INDEX:
294
+ offs = roop.globals.INPUT_FACESETS[SELECTED_INPUT_FACE_INDEX].faces[0].mask_offsets
295
+ offs[index] = mask_offset
296
+ if offs[0] + offs[1] > 0.99:
297
+ offs[0] = 0.99
298
+ offs[1] = 0.0
299
+ if offs[2] + offs[3] > 0.99:
300
+ offs[2] = 0.99
301
+ offs[3] = 0.0
302
+ roop.globals.INPUT_FACESETS[SELECTED_INPUT_FACE_INDEX].faces[0].mask_offsets = offs
303
+
304
+ def on_mask_engine_changed(mask_engine):
305
+ if mask_engine == "Clip2Seg":
306
+ return gr.Textbox(interactive=True)
307
+ return gr.Textbox(interactive=False)
308
+
309
+
310
+ def on_add_local_folder(folder):
311
+ files = util.get_local_files_from_folder(folder)
312
+ if files is None:
313
+ gr.Warning("Empty folder or folder not found!")
314
+ return files
315
+
316
+
317
+ def on_srcfile_changed(srcfiles, progress=gr.Progress()):
318
+ global SELECTION_FACES_DATA, IS_INPUT, input_faces, face_selection, last_image
319
+
320
+ IS_INPUT = True
321
+
322
+ if srcfiles is None or len(srcfiles) < 1:
323
+ return gr.Column(visible=False), None, ui.globals.ui_input_thumbs, None
324
+
325
+ for f in srcfiles:
326
+ source_path = f.name
327
+ if source_path.lower().endswith('fsz'):
328
+ progress(0, desc="Retrieving faces from Faceset File")
329
+ unzipfolder = os.path.join(os.environ["TEMP"], 'faceset')
330
+ if os.path.isdir(unzipfolder):
331
+ files = os.listdir(unzipfolder)
332
+ for file in files:
333
+ os.remove(os.path.join(unzipfolder, file))
334
+ else:
335
+ os.makedirs(unzipfolder)
336
+ util.mkdir_with_umask(unzipfolder)
337
+ util.unzip(source_path, unzipfolder)
338
+ is_first = True
339
+ face_set = FaceSet()
340
+ for file in os.listdir(unzipfolder):
341
+ if file.endswith(".png"):
342
+ filename = os.path.join(unzipfolder,file)
343
+ progress(0, desc="Extracting faceset")
344
+ SELECTION_FACES_DATA = extract_face_images(filename, (False, 0))
345
+ for f in SELECTION_FACES_DATA:
346
+ face = f[0]
347
+ face.mask_offsets = (0,0,0,0,1,20)
348
+ face_set.faces.append(face)
349
+ if is_first:
350
+ image = util.convert_to_gradio(f[1])
351
+ ui.globals.ui_input_thumbs.append(image)
352
+ is_first = False
353
+ face_set.ref_images.append(get_image_frame(filename))
354
+ if len(face_set.faces) > 0:
355
+ if len(face_set.faces) > 1:
356
+ face_set.AverageEmbeddings()
357
+ roop.globals.INPUT_FACESETS.append(face_set)
358
+
359
+ elif util.has_image_extension(source_path):
360
+ progress(0, desc="Retrieving faces from image")
361
+ roop.globals.source_path = source_path
362
+ SELECTION_FACES_DATA = extract_face_images(roop.globals.source_path, (False, 0))
363
+ progress(0.5, desc="Retrieving faces from image")
364
+ for f in SELECTION_FACES_DATA:
365
+ face_set = FaceSet()
366
+ face = f[0]
367
+ face.mask_offsets = (0,0,0,0,1,20)
368
+ face_set.faces.append(face)
369
+ image = util.convert_to_gradio(f[1])
370
+ ui.globals.ui_input_thumbs.append(image)
371
+ roop.globals.INPUT_FACESETS.append(face_set)
372
+
373
+ progress(1.0)
374
+ return gr.Column(visible=False), None, ui.globals.ui_input_thumbs,None
375
+
376
+
377
+ def on_select_input_face(evt: gr.SelectData):
378
+ global SELECTED_INPUT_FACE_INDEX
379
+
380
+ SELECTED_INPUT_FACE_INDEX = evt.index
381
+
382
+
383
+ def remove_selected_input_face():
384
+ global SELECTED_INPUT_FACE_INDEX
385
+
386
+ if len(roop.globals.INPUT_FACESETS) > SELECTED_INPUT_FACE_INDEX:
387
+ f = roop.globals.INPUT_FACESETS.pop(SELECTED_INPUT_FACE_INDEX)
388
+ del f
389
+ if len(ui.globals.ui_input_thumbs) > SELECTED_INPUT_FACE_INDEX:
390
+ f = ui.globals.ui_input_thumbs.pop(SELECTED_INPUT_FACE_INDEX)
391
+ del f
392
+
393
+ return ui.globals.ui_input_thumbs
394
+
395
+ def move_selected_input(button_text):
396
+ global SELECTED_INPUT_FACE_INDEX
397
+
398
+ if button_text == "⬅ Move left":
399
+ if SELECTED_INPUT_FACE_INDEX <= 0:
400
+ return ui.globals.ui_input_thumbs
401
+ offset = -1
402
+ else:
403
+ if len(ui.globals.ui_input_thumbs) <= SELECTED_INPUT_FACE_INDEX:
404
+ return ui.globals.ui_input_thumbs
405
+ offset = 1
406
+
407
+ f = roop.globals.INPUT_FACESETS.pop(SELECTED_INPUT_FACE_INDEX)
408
+ roop.globals.INPUT_FACESETS.insert(SELECTED_INPUT_FACE_INDEX + offset, f)
409
+ f = ui.globals.ui_input_thumbs.pop(SELECTED_INPUT_FACE_INDEX)
410
+ ui.globals.ui_input_thumbs.insert(SELECTED_INPUT_FACE_INDEX + offset, f)
411
+ return ui.globals.ui_input_thumbs
412
+
413
+
414
+ def move_selected_target(button_text):
415
+ global SELECTED_TARGET_FACE_INDEX
416
+
417
+ if button_text == "⬅ Move left":
418
+ if SELECTED_TARGET_FACE_INDEX <= 0:
419
+ return ui.globals.ui_target_thumbs
420
+ offset = -1
421
+ else:
422
+ if len(ui.globals.ui_target_thumbs) <= SELECTED_TARGET_FACE_INDEX:
423
+ return ui.globals.ui_target_thumbs
424
+ offset = 1
425
+
426
+ f = roop.globals.TARGET_FACES.pop(SELECTED_TARGET_FACE_INDEX)
427
+ roop.globals.TARGET_FACES.insert(SELECTED_TARGET_FACE_INDEX + offset, f)
428
+ f = ui.globals.ui_target_thumbs.pop(SELECTED_TARGET_FACE_INDEX)
429
+ ui.globals.ui_target_thumbs.insert(SELECTED_TARGET_FACE_INDEX + offset, f)
430
+ return ui.globals.ui_target_thumbs
431
+
432
+
433
+
434
+
435
+ def on_select_target_face(evt: gr.SelectData):
436
+ global SELECTED_TARGET_FACE_INDEX
437
+
438
+ SELECTED_TARGET_FACE_INDEX = evt.index
439
+
440
+ def remove_selected_target_face():
441
+ if len(ui.globals.ui_target_thumbs) > SELECTED_TARGET_FACE_INDEX:
442
+ f = roop.globals.TARGET_FACES.pop(SELECTED_TARGET_FACE_INDEX)
443
+ del f
444
+ if len(ui.globals.ui_target_thumbs) > SELECTED_TARGET_FACE_INDEX:
445
+ f = ui.globals.ui_target_thumbs.pop(SELECTED_TARGET_FACE_INDEX)
446
+ del f
447
+ return ui.globals.ui_target_thumbs
448
+
449
+
450
+ def on_use_face_from_selected(files, frame_num):
451
+ global IS_INPUT, SELECTION_FACES_DATA
452
+
453
+ IS_INPUT = False
454
+ thumbs = []
455
+
456
+ roop.globals.target_path = files[selected_preview_index].name
457
+ if util.is_image(roop.globals.target_path) and not roop.globals.target_path.lower().endswith(('gif')):
458
+ SELECTION_FACES_DATA = extract_face_images(roop.globals.target_path, (False, 0))
459
+ if len(SELECTION_FACES_DATA) > 0:
460
+ for f in SELECTION_FACES_DATA:
461
+ image = util.convert_to_gradio(f[1])
462
+ thumbs.append(image)
463
+ else:
464
+ gr.Info('No faces detected!')
465
+ roop.globals.target_path = None
466
+
467
+ elif util.is_video(roop.globals.target_path) or roop.globals.target_path.lower().endswith(('gif')):
468
+ selected_frame = frame_num
469
+ SELECTION_FACES_DATA = extract_face_images(roop.globals.target_path, (True, selected_frame))
470
+ if len(SELECTION_FACES_DATA) > 0:
471
+ for f in SELECTION_FACES_DATA:
472
+ image = util.convert_to_gradio(f[1])
473
+ thumbs.append(image)
474
+ else:
475
+ gr.Info('No faces detected!')
476
+ roop.globals.target_path = None
477
+ else:
478
+ gr.Info('Unknown image/video type!')
479
+ roop.globals.target_path = None
480
+
481
+ if len(thumbs) == 1:
482
+ roop.globals.TARGET_FACES.append(SELECTION_FACES_DATA[0][0])
483
+ ui.globals.ui_target_thumbs.append(thumbs[0])
484
+ return gr.Row(visible=False), None, ui.globals.ui_target_thumbs, gr.Dropdown(value='Selected face')
485
+
486
+ return gr.Row(visible=True), thumbs, gr.Gallery(visible=True), gr.Dropdown(visible=True)
487
+
488
+
489
+ def on_select_face(evt: gr.SelectData): # SelectData is a subclass of EventData
490
+ global SELECTED_FACE_INDEX
491
+ SELECTED_FACE_INDEX = evt.index
492
+
493
+
494
+ def on_selected_face():
495
+ global IS_INPUT, SELECTED_FACE_INDEX, SELECTION_FACES_DATA
496
+
497
+ fd = SELECTION_FACES_DATA[SELECTED_FACE_INDEX]
498
+ image = util.convert_to_gradio(fd[1])
499
+ if IS_INPUT:
500
+ face_set = FaceSet()
501
+ fd[0].mask_offsets = (0,0,0,0,1,20)
502
+ face_set.faces.append(fd[0])
503
+ roop.globals.INPUT_FACESETS.append(face_set)
504
+ ui.globals.ui_input_thumbs.append(image)
505
+ return ui.globals.ui_input_thumbs, gr.Gallery(visible=True), gr.Dropdown(visible=True)
506
+ else:
507
+ roop.globals.TARGET_FACES.append(fd[0])
508
+ ui.globals.ui_target_thumbs.append(image)
509
+ return gr.Gallery(visible=True), ui.globals.ui_target_thumbs, gr.Dropdown(value='Selected face')
510
+
511
+ # bt_faceselect.click(fn=on_selected_face, outputs=[dynamic_face_selection, face_selection, input_faces, target_faces])
512
+
513
+ def on_end_face_selection():
514
+ return gr.Column(visible=False), None
515
+
516
+
517
+ def on_preview_frame_changed(swap_model, frame_num, files, fake_preview, enhancer, detection, face_distance, blend_ratio,
518
+ selected_mask_engine, clip_text, no_face_action, vr_mode, auto_rotate, maskimage, show_face_area, restore_original_mouth, num_steps, upsample):
519
+ global SELECTED_INPUT_FACE_INDEX, manual_masking, current_video_fps
520
+
521
+ from roop.core import live_swap, get_processing_plugins
522
+
523
+ manual_masking = False
524
+ mask_offsets = (0,0,0,0)
525
+ if len(roop.globals.INPUT_FACESETS) > SELECTED_INPUT_FACE_INDEX:
526
+ if not hasattr(roop.globals.INPUT_FACESETS[SELECTED_INPUT_FACE_INDEX].faces[0], 'mask_offsets'):
527
+ roop.globals.INPUT_FACESETS[SELECTED_INPUT_FACE_INDEX].faces[0].mask_offsets = mask_offsets
528
+ mask_offsets = roop.globals.INPUT_FACESETS[SELECTED_INPUT_FACE_INDEX].faces[0].mask_offsets
529
+
530
+ timeinfo = '0:00:00'
531
+ if files is None or selected_preview_index >= len(files) or frame_num is None:
532
+ return None,None, gr.Slider(info=timeinfo)
533
+
534
+ filename = files[selected_preview_index].name
535
+ if util.is_video(filename) or filename.lower().endswith('gif'):
536
+ current_frame = get_video_frame(filename, frame_num)
537
+ if current_video_fps == 0:
538
+ current_video_fps = 1
539
+ secs = (frame_num - 1) / current_video_fps
540
+ minutes = secs / 60
541
+ secs = secs % 60
542
+ hours = minutes / 60
543
+ minutes = minutes % 60
544
+ milliseconds = (secs - int(secs)) * 1000
545
+ timeinfo = f"{int(hours):0>2}:{int(minutes):0>2}:{int(secs):0>2}.{int(milliseconds):0>3}"
546
+ else:
547
+ current_frame = get_image_frame(filename)
548
+ if current_frame is None:
549
+ return None, None, gr.Slider(info=timeinfo)
550
+
551
+ layers = None
552
+ if maskimage is not None:
553
+ layers = maskimage["layers"]
554
+
555
+ if not fake_preview or len(roop.globals.INPUT_FACESETS) < 1:
556
+ return gr.Image(value=util.convert_to_gradio(current_frame), visible=True), gr.ImageEditor(visible=False), gr.Slider(info=timeinfo)
557
+
558
+ roop.globals.face_swap_mode = translate_swap_mode(detection)
559
+ roop.globals.selected_enhancer = enhancer
560
+ roop.globals.distance_threshold = face_distance
561
+ roop.globals.blend_ratio = blend_ratio
562
+ roop.globals.no_face_action = index_of_no_face_action(no_face_action)
563
+ roop.globals.vr_mode = vr_mode
564
+ roop.globals.autorotate_faces = auto_rotate
565
+ roop.globals.subsample_size = int(upsample[:3])
566
+
567
+
568
+ mask_engine = map_mask_engine(selected_mask_engine, clip_text)
569
+
570
+ roop.globals.execution_threads = roop.globals.CFG.max_threads
571
+ mask = layers[0] if layers is not None else None
572
+ face_index = SELECTED_INPUT_FACE_INDEX
573
+ if len(roop.globals.INPUT_FACESETS) <= face_index:
574
+ face_index = 0
575
+
576
+ options = ProcessOptions(swap_model, get_processing_plugins(mask_engine), roop.globals.distance_threshold, roop.globals.blend_ratio,
577
+ roop.globals.face_swap_mode, face_index, clip_text, maskimage, num_steps, roop.globals.subsample_size, show_face_area, restore_original_mouth)
578
+
579
+ current_frame = live_swap(current_frame, options)
580
+ if current_frame is None:
581
+ return gr.Image(visible=True), None, gr.Slider(info=timeinfo)
582
+ return gr.Image(value=util.convert_to_gradio(current_frame), visible=True), gr.ImageEditor(visible=False), gr.Slider(info=timeinfo)
583
+
584
+ def map_mask_engine(selected_mask_engine, clip_text):
585
+ if selected_mask_engine == "Clip2Seg":
586
+ mask_engine = "mask_clip2seg"
587
+ if clip_text is None or len(clip_text) < 1:
588
+ mask_engine = None
589
+ elif selected_mask_engine == "DFL XSeg":
590
+ mask_engine = "mask_xseg"
591
+ else:
592
+ mask_engine = None
593
+ return mask_engine
594
+
595
+
596
+ def on_toggle_masking(previewimage, mask):
597
+ global manual_masking
598
+
599
+ manual_masking = not manual_masking
600
+ if manual_masking:
601
+ layers = mask["layers"]
602
+ if len(layers) == 1:
603
+ layers = [create_blank_image(previewimage.shape[1],previewimage.shape[0])]
604
+ return gr.Image(visible=False), gr.ImageEditor(value={"background": previewimage, "layers": layers, "composite": None}, visible=True)
605
+ return gr.Image(visible=True), gr.ImageEditor(visible=False)
606
+
607
+ def gen_processing_text(start, end):
608
+ return f'Processing frame range [{start} - {end}]'
609
+
610
+ def on_set_frame(sender:str, frame_num):
611
+ global selected_preview_index, list_files_process
612
+
613
+ idx = selected_preview_index
614
+ if list_files_process[idx].endframe == 0:
615
+ return gen_processing_text(0,0)
616
+
617
+ start = list_files_process[idx].startframe
618
+ end = list_files_process[idx].endframe
619
+ if sender.lower().endswith('start'):
620
+ list_files_process[idx].startframe = min(frame_num, end)
621
+ else:
622
+ list_files_process[idx].endframe = max(frame_num, start)
623
+
624
+ return gen_processing_text(list_files_process[idx].startframe,list_files_process[idx].endframe)
625
+
626
+
627
+ def on_preview_mask(swap_model, frame_num, files, clip_text, mask_engine):
628
+ from roop.core import live_swap, get_processing_plugins
629
+ global is_processing
630
+
631
+ if is_processing or files is None or selected_preview_index >= len(files) or clip_text is None or frame_num is None:
632
+ return None
633
+
634
+ filename = files[selected_preview_index].name
635
+ if util.is_video(filename) or filename.lower().endswith('gif'):
636
+ current_frame = get_video_frame(filename, frame_num
637
+ )
638
+ else:
639
+ current_frame = get_image_frame(filename)
640
+ if current_frame is None or mask_engine is None:
641
+ return None
642
+ if mask_engine == "Clip2Seg":
643
+ mask_engine = "mask_clip2seg"
644
+ if clip_text is None or len(clip_text) < 1:
645
+ mask_engine = None
646
+ elif mask_engine == "DFL XSeg":
647
+ mask_engine = "mask_xseg"
648
+ options = ProcessOptions(swap_model, get_processing_plugins(mask_engine), roop.globals.distance_threshold, roop.globals.blend_ratio,
649
+ "all", 0, clip_text, None, 0, 128, False, False, True)
650
+
651
+ current_frame = live_swap(current_frame, options)
652
+ return util.convert_to_gradio(current_frame)
653
+
654
+
655
+ def on_clear_input_faces():
656
+ ui.globals.ui_input_thumbs.clear()
657
+ roop.globals.INPUT_FACESETS.clear()
658
+ return ui.globals.ui_input_thumbs
659
+
660
+ def on_clear_destfiles():
661
+ roop.globals.TARGET_FACES.clear()
662
+ ui.globals.ui_target_thumbs.clear()
663
+ return ui.globals.ui_target_thumbs, gr.Dropdown(value="First found")
664
+
665
+
666
+ def index_of_no_face_action(dropdown_text):
667
+ global no_face_choices
668
+
669
+ return no_face_choices.index(dropdown_text)
670
+
671
+ def translate_swap_mode(dropdown_text):
672
+ if dropdown_text == "Selected face":
673
+ return "selected"
674
+ elif dropdown_text == "First found":
675
+ return "first"
676
+ elif dropdown_text == "All input faces":
677
+ return "all_input"
678
+ elif dropdown_text == "All input faces (random)":
679
+ return "all_random"
680
+ elif dropdown_text == "All female":
681
+ return "all_female"
682
+ elif dropdown_text == "All male":
683
+ return "all_male"
684
+
685
+ return "all"
686
+
687
+
688
+ def start_swap( swap_model, output_method, enhancer, detection, keep_frames, wait_after_extraction, skip_audio, face_distance, blend_ratio,
689
+ selected_mask_engine, clip_text, processing_method, no_face_action, vr_mode, autorotate, restore_original_mouth, num_swap_steps, upsample, imagemask, progress=gr.Progress()):
690
+ from ui.main import prepare_environment
691
+ from roop.core import batch_process_regular
692
+ global is_processing, list_files_process
693
+
694
+ if list_files_process is None or len(list_files_process) <= 0:
695
+ return gr.Button(variant="primary"), None, None
696
+
697
+ if roop.globals.CFG.clear_output:
698
+ clean_dir(roop.globals.output_path)
699
+
700
+ if not util.is_installed("ffmpeg"):
701
+ msg = "ffmpeg is not installed! No video processing possible."
702
+ gr.Warning(msg)
703
+
704
+ prepare_environment()
705
+
706
+ roop.globals.selected_enhancer = enhancer
707
+ roop.globals.target_path = None
708
+ roop.globals.distance_threshold = face_distance
709
+ roop.globals.blend_ratio = blend_ratio
710
+ roop.globals.keep_frames = keep_frames
711
+ roop.globals.wait_after_extraction = wait_after_extraction
712
+ roop.globals.skip_audio = skip_audio
713
+ roop.globals.face_swap_mode = translate_swap_mode(detection)
714
+ roop.globals.no_face_action = index_of_no_face_action(no_face_action)
715
+ roop.globals.vr_mode = vr_mode
716
+ roop.globals.autorotate_faces = autorotate
717
+ roop.globals.subsample_size = int(upsample[:3])
718
+ mask_engine = map_mask_engine(selected_mask_engine, clip_text)
719
+
720
+ if roop.globals.face_swap_mode == 'selected':
721
+ if len(roop.globals.TARGET_FACES) < 1:
722
+ gr.Error('No Target Face selected!')
723
+ return gr.Button(variant="primary"), None, None
724
+
725
+ is_processing = True
726
+ yield gr.Button(variant="secondary", interactive=False), gr.Button(variant="primary", interactive=True), None
727
+ roop.globals.execution_threads = roop.globals.CFG.max_threads
728
+ roop.globals.video_encoder = roop.globals.CFG.output_video_codec
729
+ roop.globals.video_quality = roop.globals.CFG.video_quality
730
+ roop.globals.max_memory = roop.globals.CFG.memory_limit if roop.globals.CFG.memory_limit > 0 else None
731
+
732
+ batch_process_regular(swap_model, output_method, list_files_process, mask_engine, clip_text, processing_method == "In-Memory processing", imagemask, restore_original_mouth, num_swap_steps, progress, SELECTED_INPUT_FACE_INDEX)
733
+ is_processing = False
734
+ outdir = pathlib.Path(roop.globals.output_path)
735
+ outfiles = [str(item) for item in outdir.rglob("*") if item.is_file()]
736
+ if len(outfiles) > 0:
737
+ yield gr.Button(variant="primary", interactive=True),gr.Button(variant="secondary", interactive=False),gr.Files(value=outfiles)
738
+ else:
739
+ yield gr.Button(variant="primary", interactive=True),gr.Button(variant="secondary", interactive=False),None
740
+
741
+
742
+ def stop_swap():
743
+ roop.globals.processing = False
744
+ gr.Info('Aborting processing - please wait for the remaining threads to be stopped')
745
+ return gr.Button(variant="primary", interactive=True),gr.Button(variant="secondary", interactive=False),None
746
+
747
+
748
+ def on_fps_changed(fps):
749
+ global selected_preview_index, list_files_process
750
+
751
+ if len(list_files_process) < 1 or list_files_process[selected_preview_index].endframe < 1:
752
+ return
753
+ list_files_process[selected_preview_index].fps = fps
754
+
755
+
756
+ def on_destfiles_changed(destfiles):
757
+ global selected_preview_index, list_files_process, current_video_fps
758
+
759
+ if destfiles is None or len(destfiles) < 1:
760
+ list_files_process.clear()
761
+ return gr.Slider(value=1, maximum=1, info='0:00:00'), ''
762
+
763
+ for f in destfiles:
764
+ list_files_process.append(ProcessEntry(f.name, 0,0, 0))
765
+
766
+ selected_preview_index = 0
767
+ idx = selected_preview_index
768
+
769
+ filename = list_files_process[idx].filename
770
+
771
+ if util.is_video(filename) or filename.lower().endswith('gif'):
772
+ total_frames = get_video_frame_total(filename)
773
+ if total_frames is None or total_frames < 1:
774
+ total_frames = 1
775
+ gr.Warning(f"Corrupted video {filename}, can't detect number of frames!")
776
+ else:
777
+ current_video_fps = util.detect_fps(filename)
778
+ else:
779
+ total_frames = 1
780
+ list_files_process[idx].endframe = total_frames
781
+ if total_frames > 1:
782
+ return gr.Slider(value=1, maximum=total_frames, info='0:00:00'), gen_processing_text(list_files_process[idx].startframe,list_files_process[idx].endframe)
783
+ return gr.Slider(value=1, maximum=total_frames, info='0:00:00'), ''
784
+
785
+
786
+ def on_destfiles_selected(evt: gr.SelectData):
787
+ global selected_preview_index, list_files_process, current_video_fps
788
+
789
+ if evt is not None:
790
+ selected_preview_index = evt.index
791
+ idx = selected_preview_index
792
+ filename = list_files_process[idx].filename
793
+ fps = list_files_process[idx].fps
794
+ if util.is_video(filename) or filename.lower().endswith('gif'):
795
+ total_frames = get_video_frame_total(filename)
796
+ current_video_fps = util.detect_fps(filename)
797
+ if list_files_process[idx].endframe == 0:
798
+ list_files_process[idx].endframe = total_frames
799
+ else:
800
+ total_frames = 1
801
+
802
+ if total_frames > 1:
803
+ return gr.Slider(value=list_files_process[idx].startframe, maximum=total_frames, info='0:00:00'), gen_processing_text(list_files_process[idx].startframe,list_files_process[idx].endframe), fps
804
+ return gr.Slider(value=1, maximum=total_frames, info='0:00:00'), gen_processing_text(0,0), fps
805
+
806
+
807
+ def on_resultfiles_selected(evt: gr.SelectData, files):
808
+ selected_index = evt.index
809
+ filename = files[selected_index].name
810
+ return display_output(filename)
811
+
812
+ def on_resultfiles_finished(files):
813
+ selected_index = 0
814
+ if files is None or len(files) < 1:
815
+ return None, None
816
+
817
+ filename = files[selected_index].name
818
+ return display_output(filename)
819
+
820
+
821
+ def get_gradio_output_format():
822
+ if roop.globals.CFG.output_image_format == "jpg":
823
+ return "jpeg"
824
+ return roop.globals.CFG.output_image_format
825
+
826
+
827
+ def display_output(filename):
828
+ if util.is_video(filename) and roop.globals.CFG.output_show_video:
829
+ return gr.Image(visible=False), gr.Video(visible=True, value=filename)
830
+ else:
831
+ if util.is_video(filename) or filename.lower().endswith('gif'):
832
+ current_frame = get_video_frame(filename)
833
+ else:
834
+ current_frame = get_image_frame(filename)
835
+ return gr.Image(visible=True, value=util.convert_to_gradio(current_frame)), gr.Video(visible=False)
ui/tabs/livecam_tab.py ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import roop.globals
3
+ import ui.globals
4
+
5
+
6
+ camera_frame = None
7
+
8
+ def livecam_tab():
9
+ with gr.Tab("🎥 Live Cam"):
10
+ with gr.Row(variant='panel'):
11
+ gr.Markdown("""
12
+ This feature will allow you to use your physical webcam and apply the selected faces to the stream.
13
+ You can also forward the stream to a virtual camera, which can be used in video calls or streaming software.<br />
14
+ Supported are: v4l2loopback (linux), OBS Virtual Camera (macOS/Windows) and unitycapture (Windows).<br />
15
+ **Please note:** to change the face or any other settings you need to stop and restart a running live cam.
16
+ """)
17
+
18
+ with gr.Row(variant='panel'):
19
+ with gr.Column():
20
+ bt_start = gr.Button("▶ Start", variant='primary')
21
+ with gr.Column():
22
+ bt_stop = gr.Button("⏹ Stop", variant='secondary', interactive=False)
23
+ with gr.Column():
24
+ camera_num = gr.Slider(0, 8, value=0, label="Camera Number", step=1.0, interactive=True)
25
+ cb_obs = gr.Checkbox(label="Forward stream to virtual camera", interactive=True)
26
+ with gr.Column():
27
+ dd_reso = gr.Dropdown(choices=["640x480","1280x720", "1920x1080"], value="1280x720", label="Fake Camera Resolution", interactive=True)
28
+ cb_xseg = gr.Checkbox(label="Use DFL Xseg masking", interactive=True, value=True)
29
+ cb_mouthrestore = gr.Checkbox(label="Restore original mouth area", interactive=True, value=False)
30
+
31
+ with gr.Row():
32
+ fake_cam_image = gr.Image(label='Fake Camera Output', interactive=False, format="jpeg")
33
+
34
+ start_event = bt_start.click(fn=start_cam, inputs=[ui.globals.ui_selected_swap_model, cb_obs, cb_xseg, cb_mouthrestore, camera_num, dd_reso, ui.globals.ui_selected_enhancer, ui.globals.ui_blend_ratio, ui.globals.ui_upscale],outputs=[bt_start, bt_stop,fake_cam_image])
35
+ bt_stop.click(fn=stop_swap, cancels=[start_event], outputs=[bt_start, bt_stop], queue=False)
36
+
37
+
38
+ def start_cam(swap_model, stream_to_obs, use_xseg, use_mouthrestore, cam, reso, enhancer, blend_ratio, upscale):
39
+ from roop.virtualcam import start_virtual_cam
40
+ from roop.utilities import convert_to_gradio
41
+
42
+ roop.globals.selected_enhancer = enhancer
43
+ roop.globals.blend_ratio = blend_ratio
44
+ roop.globals.subsample_size = int(upscale[:3])
45
+ start_virtual_cam(swap_model, stream_to_obs, use_xseg, use_mouthrestore, cam, reso)
46
+ while True:
47
+ yield gr.Button(interactive=False), gr.Button(interactive=True), convert_to_gradio(ui.globals.ui_camera_frame)
48
+
49
+
50
+ def stop_swap():
51
+ from roop.virtualcam import stop_virtual_cam
52
+ stop_virtual_cam()
53
+ return gr.Button(interactive=True), gr.Button(interactive=False)
54
+
55
+
56
+
57
+
ui/tabs/settings_tab.py ADDED
@@ -0,0 +1,129 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import shutil
2
+ import os
3
+ import gradio as gr
4
+ import roop.globals
5
+ import ui.globals
6
+ from roop.utilities import clean_dir
7
+
8
+ available_themes = ["Default", "gradio/glass", "gradio/monochrome", "gradio/seafoam", "gradio/soft", "gstaff/xkcd", "freddyaboulton/dracula_revamped", "ysharma/steampunk"]
9
+ image_formats = ['jpg','png', 'webp']
10
+ video_formats = ['avi','mkv', 'mp4', 'webm']
11
+ video_codecs = ['libx264', 'libx265', 'libvpx-vp9', 'h264_nvenc', 'hevc_nvenc']
12
+ providerlist = None
13
+
14
+ settings_controls = []
15
+
16
+ def settings_tab():
17
+ from roop.core import suggest_execution_providers
18
+ global providerlist
19
+
20
+ providerlist = suggest_execution_providers()
21
+ with gr.Tab("⚙ Settings"):
22
+ with gr.Row():
23
+ with gr.Column():
24
+ themes = gr.Dropdown(available_themes, label="Theme", info="Change needs complete restart", value=roop.globals.CFG.selected_theme)
25
+ with gr.Column():
26
+ settings_controls.append(gr.Checkbox(label="Public Server", value=roop.globals.CFG.server_share, elem_id='server_share', interactive=True))
27
+ settings_controls.append(gr.Checkbox(label='Clear output folder before each run', value=roop.globals.CFG.clear_output, elem_id='clear_output', interactive=True))
28
+ output_template = gr.Textbox(label="Filename Output Template", info="(file extension is added automatically)", lines=1, placeholder='{file}_{time}', value=roop.globals.CFG.output_template)
29
+ with gr.Column():
30
+ input_server_name = gr.Textbox(label="Server Name", lines=1, info="Leave blank to run locally", value=roop.globals.CFG.server_name)
31
+ with gr.Column():
32
+ input_server_port = gr.Number(label="Server Port", precision=0, info="Leave at 0 to use default", value=roop.globals.CFG.server_port)
33
+ with gr.Row():
34
+ with gr.Column():
35
+ settings_controls.append(gr.Dropdown(providerlist, label="Provider", value=roop.globals.CFG.provider, elem_id='provider', interactive=True))
36
+ chk_det_size = gr.Checkbox(label="Use default Det-Size", value=True, elem_id='default_det_size', interactive=True)
37
+ settings_controls.append(gr.Checkbox(label="Force CPU for Face Analyser", value=roop.globals.CFG.force_cpu, elem_id='force_cpu', interactive=True))
38
+ max_threads = gr.Slider(1, 32, value=roop.globals.CFG.max_threads, label="Max. Number of Threads", info='default: 3', step=1.0, interactive=True)
39
+ with gr.Column():
40
+ memory_limit = gr.Slider(0, 128, value=roop.globals.CFG.memory_limit, label="Max. Memory to use (Gb)", info='0 meaning no limit', step=1.0, interactive=True)
41
+ settings_controls.append(gr.Dropdown(image_formats, label="Image Output Format", info='default: png', value=roop.globals.CFG.output_image_format, elem_id='output_image_format', interactive=True))
42
+ with gr.Column():
43
+ settings_controls.append(gr.Dropdown(video_codecs, label="Video Codec", info='default: libx264', value=roop.globals.CFG.output_video_codec, elem_id='output_video_codec', interactive=True))
44
+ settings_controls.append(gr.Dropdown(video_formats, label="Video Output Format", info='default: mp4', value=roop.globals.CFG.output_video_format, elem_id='output_video_format', interactive=True))
45
+ video_quality = gr.Slider(0, 100, value=roop.globals.CFG.video_quality, label="Video Quality (crf)", info='default: 14', step=1.0, interactive=True)
46
+ with gr.Column():
47
+ with gr.Group():
48
+ settings_controls.append(gr.Checkbox(label='Use OS temp folder', value=roop.globals.CFG.use_os_temp_folder, elem_id='use_os_temp_folder', interactive=True))
49
+ settings_controls.append(gr.Checkbox(label='Show video in browser (re-encodes output)', value=roop.globals.CFG.output_show_video, elem_id='output_show_video', interactive=True))
50
+ button_apply_restart = gr.Button("Restart Server", variant='primary')
51
+ button_clean_temp = gr.Button("Clean temp folder")
52
+ button_apply_settings = gr.Button("Apply Settings")
53
+
54
+ chk_det_size.select(fn=on_option_changed)
55
+
56
+ # Settings
57
+ for s in settings_controls:
58
+ s.select(fn=on_settings_changed)
59
+ max_threads.input(fn=lambda a,b='max_threads':on_settings_changed_misc(a,b), inputs=[max_threads])
60
+ memory_limit.input(fn=lambda a,b='memory_limit':on_settings_changed_misc(a,b), inputs=[memory_limit])
61
+ video_quality.input(fn=lambda a,b='video_quality':on_settings_changed_misc(a,b), inputs=[video_quality])
62
+
63
+ # button_clean_temp.click(fn=clean_temp, outputs=[bt_srcfiles, input_faces, target_faces, bt_destfiles])
64
+ button_clean_temp.click(fn=clean_temp)
65
+ button_apply_settings.click(apply_settings, inputs=[themes, input_server_name, input_server_port, output_template])
66
+ button_apply_restart.click(restart)
67
+
68
+
69
+ def on_option_changed(evt: gr.SelectData):
70
+ attribname = evt.target.elem_id
71
+ if isinstance(evt.target, gr.Checkbox):
72
+ if hasattr(roop.globals, attribname):
73
+ setattr(roop.globals, attribname, evt.selected)
74
+ return
75
+ elif isinstance(evt.target, gr.Dropdown):
76
+ if hasattr(roop.globals, attribname):
77
+ setattr(roop.globals, attribname, evt.value)
78
+ return
79
+ raise gr.Error(f'Unhandled Setting for {evt.target}')
80
+
81
+
82
+ def on_settings_changed_misc(new_val, attribname):
83
+ if hasattr(roop.globals.CFG, attribname):
84
+ setattr(roop.globals.CFG, attribname, new_val)
85
+ else:
86
+ print("Didn't find attrib!")
87
+
88
+
89
+
90
+ def on_settings_changed(evt: gr.SelectData):
91
+ attribname = evt.target.elem_id
92
+ if isinstance(evt.target, gr.Checkbox):
93
+ if hasattr(roop.globals.CFG, attribname):
94
+ setattr(roop.globals.CFG, attribname, evt.selected)
95
+ return
96
+ elif isinstance(evt.target, gr.Dropdown):
97
+ if hasattr(roop.globals.CFG, attribname):
98
+ setattr(roop.globals.CFG, attribname, evt.value)
99
+ return
100
+
101
+ raise gr.Error(f'Unhandled Setting for {evt.target}')
102
+
103
+ def clean_temp():
104
+ from ui.main import prepare_environment
105
+
106
+ ui.globals.ui_input_thumbs.clear()
107
+ roop.globals.INPUT_FACESETS.clear()
108
+ roop.globals.TARGET_FACES.clear()
109
+ ui.globals.ui_target_thumbs = []
110
+ if not roop.globals.CFG.use_os_temp_folder:
111
+ clean_dir(os.environ["TEMP"])
112
+ prepare_environment()
113
+ gr.Info('Temp Files removed')
114
+ return None,None,None,None
115
+
116
+
117
+ def apply_settings(themes, input_server_name, input_server_port, output_template):
118
+ from ui.main import show_msg
119
+
120
+ roop.globals.CFG.selected_theme = themes
121
+ roop.globals.CFG.server_name = input_server_name
122
+ roop.globals.CFG.server_port = input_server_port
123
+ roop.globals.CFG.output_template = output_template
124
+ roop.globals.CFG.save()
125
+ show_msg('Settings saved')
126
+
127
+
128
+ def restart():
129
+ ui.globals.ui_restart_server = True