Spaces:
				
			
			
	
			
			
					
		Running
		
	
	
	
			
			
	
	
	
	
		
		
					
		Running
		
	Upload 30 files
Browse files- roop/__init__.py +0 -0
 - roop/__pycache__/___init__.cpython-312.pyc +0 -0
 - roop/__pycache__/capturer.cpython-312.pyc +0 -0
 - roop/__pycache__/core.cpython-312.pyc +0 -0
 - roop/__pycache__/face_analyser.cpython-312.pyc +0 -0
 - roop/__pycache__/globals.cpython-312.pyc +0 -0
 - roop/__pycache__/metadata.cpython-312.pyc +0 -0
 - roop/__pycache__/predicter.cpython-312.pyc +0 -0
 - roop/__pycache__/typing.cpython-312.pyc +0 -0
 - roop/__pycache__/ui.cpython-312.pyc +0 -0
 - roop/__pycache__/utilities.cpython-312.pyc +0 -0
 - roop/capturer.py +20 -0
 - roop/core.py +217 -0
 - roop/face_analyser.py +34 -0
 - roop/globals.py +17 -0
 - roop/metadata.py +2 -0
 - roop/predicter.py +19 -0
 - roop/processors/__init__.py +0 -0
 - roop/processors/__pycache__/___init__.cpython-312.pyc +0 -0
 - roop/processors/frame/__init__.py +0 -0
 - roop/processors/frame/__pycache__/___init__.cpython-312.pyc +0 -0
 - roop/processors/frame/__pycache__/core.cpython-312.pyc +0 -0
 - roop/processors/frame/__pycache__/face_swapper.cpython-312.pyc +0 -0
 - roop/processors/frame/core.py +95 -0
 - roop/processors/frame/face_enhancer.py +89 -0
 - roop/processors/frame/face_swapper.py +89 -0
 - roop/typing.py +7 -0
 - roop/ui.json +158 -0
 - roop/ui.py +231 -0
 - roop/utilities.py +141 -0
 
    	
        roop/__init__.py
    ADDED
    
    | 
         
            File without changes
         
     | 
    	
        roop/__pycache__/___init__.cpython-312.pyc
    ADDED
    
    | 
         Binary file (151 Bytes). View file 
     | 
| 
         | 
    	
        roop/__pycache__/capturer.cpython-312.pyc
    ADDED
    
    | 
         Binary file (1.33 kB). View file 
     | 
| 
         | 
    	
        roop/__pycache__/core.cpython-312.pyc
    ADDED
    
    | 
         Binary file (16.1 kB). View file 
     | 
| 
         | 
    	
        roop/__pycache__/face_analyser.cpython-312.pyc
    ADDED
    
    | 
         Binary file (1.84 kB). View file 
     | 
| 
         | 
    	
        roop/__pycache__/globals.cpython-312.pyc
    ADDED
    
    | 
         Binary file (624 Bytes). View file 
     | 
| 
         | 
    	
        roop/__pycache__/metadata.cpython-312.pyc
    ADDED
    
    | 
         Binary file (194 Bytes). View file 
     | 
| 
         | 
    	
        roop/__pycache__/predicter.cpython-312.pyc
    ADDED
    
    | 
         Binary file (1.8 kB). View file 
     | 
| 
         | 
    	
        roop/__pycache__/typing.cpython-312.pyc
    ADDED
    
    | 
         Binary file (333 Bytes). View file 
     | 
| 
         | 
    	
        roop/__pycache__/ui.cpython-312.pyc
    ADDED
    
    | 
         Binary file (16.9 kB). View file 
     | 
| 
         | 
    	
        roop/__pycache__/utilities.cpython-312.pyc
    ADDED
    
    | 
         Binary file (10.3 kB). View file 
     | 
| 
         | 
    	
        roop/capturer.py
    ADDED
    
    | 
         @@ -0,0 +1,20 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            from typing import Any
         
     | 
| 2 | 
         
            +
            import cv2
         
     | 
| 3 | 
         
            +
             
     | 
| 4 | 
         
            +
             
     | 
| 5 | 
         
            +
            def get_video_frame(video_path: str, frame_number: int = 0) -> Any:
         
     | 
| 6 | 
         
            +
                capture = cv2.VideoCapture(video_path)
         
     | 
| 7 | 
         
            +
                frame_total = capture.get(cv2.CAP_PROP_FRAME_COUNT)
         
     | 
| 8 | 
         
            +
                capture.set(cv2.CAP_PROP_POS_FRAMES, min(frame_total, frame_number - 1))
         
     | 
| 9 | 
         
            +
                has_frame, frame = capture.read()
         
     | 
| 10 | 
         
            +
                capture.release()
         
     | 
| 11 | 
         
            +
                if has_frame:
         
     | 
| 12 | 
         
            +
                    return frame
         
     | 
| 13 | 
         
            +
                return None
         
     | 
| 14 | 
         
            +
             
     | 
| 15 | 
         
            +
             
     | 
| 16 | 
         
            +
            def get_video_frame_total(video_path: str) -> int:
         
     | 
| 17 | 
         
            +
                capture = cv2.VideoCapture(video_path)
         
     | 
| 18 | 
         
            +
                video_frame_total = int(capture.get(cv2.CAP_PROP_FRAME_COUNT))
         
     | 
| 19 | 
         
            +
                capture.release()
         
     | 
| 20 | 
         
            +
                return video_frame_total
         
     | 
    	
        roop/core.py
    ADDED
    
    | 
         @@ -0,0 +1,217 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            #!/usr/bin/env python3
         
     | 
| 2 | 
         
            +
             
     | 
| 3 | 
         
            +
            import os
         
     | 
| 4 | 
         
            +
            import sys
         
     | 
| 5 | 
         
            +
            # single thread doubles cuda performance - needs to be set before torch import
         
     | 
| 6 | 
         
            +
            if any(arg.startswith('--execution-provider') for arg in sys.argv):
         
     | 
| 7 | 
         
            +
                os.environ['OMP_NUM_THREADS'] = '1'
         
     | 
| 8 | 
         
            +
            # reduce tensorflow log level
         
     | 
| 9 | 
         
            +
            os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
         
     | 
| 10 | 
         
            +
            import warnings
         
     | 
| 11 | 
         
            +
            from typing import List
         
     | 
| 12 | 
         
            +
            import platform
         
     | 
| 13 | 
         
            +
            import signal
         
     | 
| 14 | 
         
            +
            import shutil
         
     | 
| 15 | 
         
            +
            import argparse
         
     | 
| 16 | 
         
            +
            import torch
         
     | 
| 17 | 
         
            +
            import onnxruntime
         
     | 
| 18 | 
         
            +
            import tensorflow
         
     | 
| 19 | 
         
            +
             
     | 
| 20 | 
         
            +
            import roop.globals
         
     | 
| 21 | 
         
            +
            import roop.metadata
         
     | 
| 22 | 
         
            +
            import roop.ui as ui
         
     | 
| 23 | 
         
            +
            from roop.predicter import predict_image, predict_video
         
     | 
| 24 | 
         
            +
            from roop.processors.frame.core import get_frame_processors_modules
         
     | 
| 25 | 
         
            +
            from roop.utilities import has_image_extension, is_image, is_video, detect_fps, create_video, extract_frames, get_temp_frame_paths, restore_audio, create_temp, move_temp, clean_temp, normalize_output_path
         
     | 
| 26 | 
         
            +
             
     | 
| 27 | 
         
            +
            if 'ROCMExecutionProvider' in roop.globals.execution_providers:
         
     | 
| 28 | 
         
            +
                del torch
         
     | 
| 29 | 
         
            +
             
     | 
| 30 | 
         
            +
            warnings.filterwarnings('ignore', category=FutureWarning, module='insightface')
         
     | 
| 31 | 
         
            +
            warnings.filterwarnings('ignore', category=UserWarning, module='torchvision')
         
     | 
| 32 | 
         
            +
             
     | 
| 33 | 
         
            +
             
     | 
| 34 | 
         
            +
            def parse_args() -> None:
         
     | 
| 35 | 
         
            +
                signal.signal(signal.SIGINT, lambda signal_number, frame: destroy())
         
     | 
| 36 | 
         
            +
                program = argparse.ArgumentParser(formatter_class=lambda prog: argparse.HelpFormatter(prog, max_help_position=100))
         
     | 
| 37 | 
         
            +
                program.add_argument('-s', '--source', help='select an source image', dest='source_path')
         
     | 
| 38 | 
         
            +
                program.add_argument('-t', '--target', help='select an target image or video', dest='target_path')
         
     | 
| 39 | 
         
            +
                program.add_argument('-o', '--output', help='select output file or directory', dest='output_path')
         
     | 
| 40 | 
         
            +
                program.add_argument('--frame-processor', help='frame processors (choices: face_swapper, face_enhancer, ...)', dest='frame_processor', default=['face_swapper'], nargs='+')
         
     | 
| 41 | 
         
            +
                program.add_argument('--keep-fps', help='keep original fps', dest='keep_fps', action='store_true', default=False)
         
     | 
| 42 | 
         
            +
                program.add_argument('--keep-audio', help='keep original audio', dest='keep_audio', action='store_true', default=True)
         
     | 
| 43 | 
         
            +
                program.add_argument('--keep-frames', help='keep temporary frames', dest='keep_frames', action='store_true', default=False)
         
     | 
| 44 | 
         
            +
                program.add_argument('--many-faces', help='process every face', dest='many_faces', action='store_true', default=False)
         
     | 
| 45 | 
         
            +
                program.add_argument('--video-encoder', help='adjust output video encoder', dest='video_encoder', default='libx264', choices=['libx264', 'libx265', 'libvpx-vp9'])
         
     | 
| 46 | 
         
            +
                program.add_argument('--video-quality', help='adjust output video quality', dest='video_quality', type=int, default=18, choices=range(52), metavar='[0-51]')
         
     | 
| 47 | 
         
            +
                program.add_argument('--max-memory', help='maximum amount of RAM in GB', dest='max_memory', type=int, default=suggest_max_memory())
         
     | 
| 48 | 
         
            +
                program.add_argument('--execution-provider', help='available execution provider (choices: cpu, ...)', dest='execution_provider', default=['cpu'], choices=suggest_execution_providers(), nargs='+')
         
     | 
| 49 | 
         
            +
                program.add_argument('--execution-threads', help='number of execution threads', dest='execution_threads', type=int, default=suggest_execution_threads())
         
     | 
| 50 | 
         
            +
                program.add_argument('-v', '--version', action='version', version=f'{roop.metadata.name} {roop.metadata.version}')
         
     | 
| 51 | 
         
            +
             
     | 
| 52 | 
         
            +
                args = program.parse_args()
         
     | 
| 53 | 
         
            +
             
     | 
| 54 | 
         
            +
                roop.globals.source_path = args.source_path
         
     | 
| 55 | 
         
            +
                roop.globals.target_path = args.target_path
         
     | 
| 56 | 
         
            +
                roop.globals.output_path = normalize_output_path(roop.globals.source_path, roop.globals.target_path, args.output_path)
         
     | 
| 57 | 
         
            +
                roop.globals.frame_processors = args.frame_processor
         
     | 
| 58 | 
         
            +
                roop.globals.headless = args.source_path or args.target_path or args.output_path
         
     | 
| 59 | 
         
            +
                roop.globals.keep_fps = args.keep_fps
         
     | 
| 60 | 
         
            +
                roop.globals.keep_audio = args.keep_audio
         
     | 
| 61 | 
         
            +
                roop.globals.keep_frames = args.keep_frames
         
     | 
| 62 | 
         
            +
                roop.globals.many_faces = args.many_faces
         
     | 
| 63 | 
         
            +
                roop.globals.video_encoder = args.video_encoder
         
     | 
| 64 | 
         
            +
                roop.globals.video_quality = args.video_quality
         
     | 
| 65 | 
         
            +
                roop.globals.max_memory = args.max_memory
         
     | 
| 66 | 
         
            +
                roop.globals.execution_providers = decode_execution_providers(args.execution_provider)
         
     | 
| 67 | 
         
            +
                roop.globals.execution_threads = args.execution_threads
         
     | 
| 68 | 
         
            +
             
     | 
| 69 | 
         
            +
             
     | 
| 70 | 
         
            +
            def encode_execution_providers(execution_providers: List[str]) -> List[str]:
         
     | 
| 71 | 
         
            +
                return [execution_provider.replace('ExecutionProvider', '').lower() for execution_provider in execution_providers]
         
     | 
| 72 | 
         
            +
             
     | 
| 73 | 
         
            +
             
     | 
| 74 | 
         
            +
            def decode_execution_providers(execution_providers: List[str]) -> List[str]:
         
     | 
| 75 | 
         
            +
                return [provider for provider, encoded_execution_provider in zip(onnxruntime.get_available_providers(), encode_execution_providers(onnxruntime.get_available_providers()))
         
     | 
| 76 | 
         
            +
                        if any(execution_provider in encoded_execution_provider for execution_provider in execution_providers)]
         
     | 
| 77 | 
         
            +
             
     | 
| 78 | 
         
            +
             
     | 
| 79 | 
         
            +
            def suggest_max_memory() -> int:
         
     | 
| 80 | 
         
            +
                if platform.system().lower() == 'darwin':
         
     | 
| 81 | 
         
            +
                    return 4
         
     | 
| 82 | 
         
            +
                return 16
         
     | 
| 83 | 
         
            +
             
     | 
| 84 | 
         
            +
             
     | 
| 85 | 
         
            +
            def suggest_execution_providers() -> List[str]:
         
     | 
| 86 | 
         
            +
                return encode_execution_providers(onnxruntime.get_available_providers())
         
     | 
| 87 | 
         
            +
             
     | 
| 88 | 
         
            +
             
     | 
| 89 | 
         
            +
            def suggest_execution_threads() -> int:
         
     | 
| 90 | 
         
            +
                if 'DmlExecutionProvider' in roop.globals.execution_providers:
         
     | 
| 91 | 
         
            +
                    return 1
         
     | 
| 92 | 
         
            +
                if 'ROCMExecutionProvider' in roop.globals.execution_providers:
         
     | 
| 93 | 
         
            +
                    return 1
         
     | 
| 94 | 
         
            +
                return 8
         
     | 
| 95 | 
         
            +
             
     | 
| 96 | 
         
            +
             
     | 
| 97 | 
         
            +
            def limit_resources() -> None:
         
     | 
| 98 | 
         
            +
                # prevent tensorflow memory leak
         
     | 
| 99 | 
         
            +
                gpus = tensorflow.config.experimental.list_physical_devices('GPU')
         
     | 
| 100 | 
         
            +
                for gpu in gpus:
         
     | 
| 101 | 
         
            +
                    tensorflow.config.experimental.set_virtual_device_configuration(gpu, [
         
     | 
| 102 | 
         
            +
                        tensorflow.config.experimental.VirtualDeviceConfiguration(memory_limit=1024)
         
     | 
| 103 | 
         
            +
                    ])
         
     | 
| 104 | 
         
            +
                # limit memory usage
         
     | 
| 105 | 
         
            +
                if roop.globals.max_memory:
         
     | 
| 106 | 
         
            +
                    memory = roop.globals.max_memory * 1024 ** 3
         
     | 
| 107 | 
         
            +
                    if platform.system().lower() == 'darwin':
         
     | 
| 108 | 
         
            +
                        memory = roop.globals.max_memory * 1024 ** 6
         
     | 
| 109 | 
         
            +
                    if platform.system().lower() == 'windows':
         
     | 
| 110 | 
         
            +
                        import ctypes
         
     | 
| 111 | 
         
            +
                        kernel32 = ctypes.windll.kernel32
         
     | 
| 112 | 
         
            +
                        kernel32.SetProcessWorkingSetSize(-1, ctypes.c_size_t(memory), ctypes.c_size_t(memory))
         
     | 
| 113 | 
         
            +
                    else:
         
     | 
| 114 | 
         
            +
                        import resource
         
     | 
| 115 | 
         
            +
                        resource.setrlimit(resource.RLIMIT_DATA, (memory, memory))
         
     | 
| 116 | 
         
            +
             
     | 
| 117 | 
         
            +
             
     | 
| 118 | 
         
            +
            def release_resources() -> None:
         
     | 
| 119 | 
         
            +
                if 'CUDAExecutionProvider' in roop.globals.execution_providers:
         
     | 
| 120 | 
         
            +
                    torch.cuda.empty_cache()
         
     | 
| 121 | 
         
            +
             
     | 
| 122 | 
         
            +
             
     | 
| 123 | 
         
            +
            def pre_check() -> bool:
         
     | 
| 124 | 
         
            +
                if sys.version_info < (3, 9):
         
     | 
| 125 | 
         
            +
                    update_status('Python version is not supported - please upgrade to 3.9 or higher.')
         
     | 
| 126 | 
         
            +
                    return False
         
     | 
| 127 | 
         
            +
                if not shutil.which('ffmpeg'):
         
     | 
| 128 | 
         
            +
                    update_status('ffmpeg is not installed.')
         
     | 
| 129 | 
         
            +
                    return False
         
     | 
| 130 | 
         
            +
                return True
         
     | 
| 131 | 
         
            +
             
     | 
| 132 | 
         
            +
             
     | 
| 133 | 
         
            +
            def update_status(message: str, scope: str = 'ROOP.CORE') -> None:
         
     | 
| 134 | 
         
            +
                print(f'[{scope}] {message}')
         
     | 
| 135 | 
         
            +
                if not roop.globals.headless:
         
     | 
| 136 | 
         
            +
                    ui.update_status(message)
         
     | 
| 137 | 
         
            +
             
     | 
| 138 | 
         
            +
             
     | 
| 139 | 
         
            +
            def start() -> None:
         
     | 
| 140 | 
         
            +
                for frame_processor in get_frame_processors_modules(roop.globals.frame_processors):
         
     | 
| 141 | 
         
            +
                    if not frame_processor.pre_start():
         
     | 
| 142 | 
         
            +
                        return
         
     | 
| 143 | 
         
            +
                # process image to image
         
     | 
| 144 | 
         
            +
                if has_image_extension(roop.globals.target_path):
         
     | 
| 145 | 
         
            +
                    if predict_image(roop.globals.target_path):
         
     | 
| 146 | 
         
            +
                        destroy()
         
     | 
| 147 | 
         
            +
                    shutil.copy2(roop.globals.target_path, roop.globals.output_path)
         
     | 
| 148 | 
         
            +
                    for frame_processor in get_frame_processors_modules(roop.globals.frame_processors):
         
     | 
| 149 | 
         
            +
                        for frame_processor_name in roop.globals.frame_processors:
         
     | 
| 150 | 
         
            +
                            if frame_processor_name == frame_processor.frame_name:
         
     | 
| 151 | 
         
            +
                                update_status('Progressing...', frame_processor.NAME)
         
     | 
| 152 | 
         
            +
                                frame_processor.process_image(roop.globals.source_path, roop.globals.output_path, roop.globals.output_path)
         
     | 
| 153 | 
         
            +
                                frame_processor.post_process()
         
     | 
| 154 | 
         
            +
                                release_resources()
         
     | 
| 155 | 
         
            +
                    if is_image(roop.globals.target_path):
         
     | 
| 156 | 
         
            +
                        update_status('Processing to image succeed!')
         
     | 
| 157 | 
         
            +
                    else:
         
     | 
| 158 | 
         
            +
                        update_status('Processing to image failed!')
         
     | 
| 159 | 
         
            +
                    return
         
     | 
| 160 | 
         
            +
                # process image to videos
         
     | 
| 161 | 
         
            +
                if predict_video(roop.globals.target_path):
         
     | 
| 162 | 
         
            +
                    destroy()
         
     | 
| 163 | 
         
            +
                update_status('Creating temp resources...')
         
     | 
| 164 | 
         
            +
                create_temp(roop.globals.target_path)
         
     | 
| 165 | 
         
            +
                update_status('Extracting frames...')
         
     | 
| 166 | 
         
            +
                extract_frames(roop.globals.target_path)
         
     | 
| 167 | 
         
            +
                temp_frame_paths = get_temp_frame_paths(roop.globals.target_path)
         
     | 
| 168 | 
         
            +
                for frame_processor in get_frame_processors_modules(roop.globals.frame_processors):
         
     | 
| 169 | 
         
            +
                    update_status('Progressing...', frame_processor.NAME)
         
     | 
| 170 | 
         
            +
                    frame_processor.process_video(roop.globals.source_path, temp_frame_paths)
         
     | 
| 171 | 
         
            +
                    frame_processor.post_process()
         
     | 
| 172 | 
         
            +
                    release_resources()
         
     | 
| 173 | 
         
            +
                # handles fps
         
     | 
| 174 | 
         
            +
                if roop.globals.keep_fps:
         
     | 
| 175 | 
         
            +
                    update_status('Detecting fps...')
         
     | 
| 176 | 
         
            +
                    fps = detect_fps(roop.globals.target_path)
         
     | 
| 177 | 
         
            +
                    update_status(f'Creating video with {fps} fps...')
         
     | 
| 178 | 
         
            +
                    create_video(roop.globals.target_path, fps)
         
     | 
| 179 | 
         
            +
                else:
         
     | 
| 180 | 
         
            +
                    update_status('Creating video with 30.0 fps...')
         
     | 
| 181 | 
         
            +
                    create_video(roop.globals.target_path)
         
     | 
| 182 | 
         
            +
                # handle audio
         
     | 
| 183 | 
         
            +
                if roop.globals.keep_audio:
         
     | 
| 184 | 
         
            +
                    if roop.globals.keep_fps:
         
     | 
| 185 | 
         
            +
                        update_status('Restoring audio...')
         
     | 
| 186 | 
         
            +
                    else:
         
     | 
| 187 | 
         
            +
                        update_status('Restoring audio might cause issues as fps are not kept...')
         
     | 
| 188 | 
         
            +
                    restore_audio(roop.globals.target_path, roop.globals.output_path)
         
     | 
| 189 | 
         
            +
                else:
         
     | 
| 190 | 
         
            +
                    move_temp(roop.globals.target_path, roop.globals.output_path)
         
     | 
| 191 | 
         
            +
                # clean and validate
         
     | 
| 192 | 
         
            +
                clean_temp(roop.globals.target_path)
         
     | 
| 193 | 
         
            +
                if is_video(roop.globals.target_path):
         
     | 
| 194 | 
         
            +
                    update_status('Processing to video succeed!')
         
     | 
| 195 | 
         
            +
                else:
         
     | 
| 196 | 
         
            +
                    update_status('Processing to video failed!')
         
     | 
| 197 | 
         
            +
             
     | 
| 198 | 
         
            +
             
     | 
| 199 | 
         
            +
            def destroy() -> None:
         
     | 
| 200 | 
         
            +
                if roop.globals.target_path:
         
     | 
| 201 | 
         
            +
                    clean_temp(roop.globals.target_path)
         
     | 
| 202 | 
         
            +
                quit()
         
     | 
| 203 | 
         
            +
             
     | 
| 204 | 
         
            +
             
     | 
| 205 | 
         
            +
            def run() -> None:
         
     | 
| 206 | 
         
            +
                parse_args()
         
     | 
| 207 | 
         
            +
                if not pre_check():
         
     | 
| 208 | 
         
            +
                    return
         
     | 
| 209 | 
         
            +
                for frame_processor in get_frame_processors_modules(roop.globals.frame_processors):
         
     | 
| 210 | 
         
            +
                    if not frame_processor.pre_check():
         
     | 
| 211 | 
         
            +
                        return
         
     | 
| 212 | 
         
            +
                limit_resources()
         
     | 
| 213 | 
         
            +
                if roop.globals.headless:
         
     | 
| 214 | 
         
            +
                    start()
         
     | 
| 215 | 
         
            +
                else:
         
     | 
| 216 | 
         
            +
                    window = ui.init(start, destroy)
         
     | 
| 217 | 
         
            +
                    window.mainloop()
         
     | 
    	
        roop/face_analyser.py
    ADDED
    
    | 
         @@ -0,0 +1,34 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            import threading
         
     | 
| 2 | 
         
            +
            from typing import Any
         
     | 
| 3 | 
         
            +
            import insightface
         
     | 
| 4 | 
         
            +
             
     | 
| 5 | 
         
            +
            import roop.globals
         
     | 
| 6 | 
         
            +
            from roop.typing import Frame
         
     | 
| 7 | 
         
            +
             
     | 
| 8 | 
         
            +
            FACE_ANALYSER = None
         
     | 
| 9 | 
         
            +
            THREAD_LOCK = threading.Lock()
         
     | 
| 10 | 
         
            +
             
     | 
| 11 | 
         
            +
             
     | 
| 12 | 
         
            +
            def get_face_analyser() -> Any:
         
     | 
| 13 | 
         
            +
                global FACE_ANALYSER
         
     | 
| 14 | 
         
            +
             
     | 
| 15 | 
         
            +
                with THREAD_LOCK:
         
     | 
| 16 | 
         
            +
                    if FACE_ANALYSER is None:
         
     | 
| 17 | 
         
            +
                        FACE_ANALYSER = insightface.app.FaceAnalysis(name='buffalo_l', providers=roop.globals.execution_providers)
         
     | 
| 18 | 
         
            +
                        FACE_ANALYSER.prepare(ctx_id=0, det_size=(640, 640))
         
     | 
| 19 | 
         
            +
                return FACE_ANALYSER
         
     | 
| 20 | 
         
            +
             
     | 
| 21 | 
         
            +
             
     | 
| 22 | 
         
            +
            def get_one_face(frame: Frame) -> Any:
         
     | 
| 23 | 
         
            +
                face = get_face_analyser().get(frame)
         
     | 
| 24 | 
         
            +
                try:
         
     | 
| 25 | 
         
            +
                    return min(face, key=lambda x: x.bbox[0])
         
     | 
| 26 | 
         
            +
                except ValueError:
         
     | 
| 27 | 
         
            +
                    return None
         
     | 
| 28 | 
         
            +
             
     | 
| 29 | 
         
            +
             
     | 
| 30 | 
         
            +
            def get_many_faces(frame: Frame) -> Any:
         
     | 
| 31 | 
         
            +
                try:
         
     | 
| 32 | 
         
            +
                    return get_face_analyser().get(frame)
         
     | 
| 33 | 
         
            +
                except IndexError:
         
     | 
| 34 | 
         
            +
                    return None
         
     | 
    	
        roop/globals.py
    ADDED
    
    | 
         @@ -0,0 +1,17 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            from typing import List
         
     | 
| 2 | 
         
            +
             
     | 
| 3 | 
         
            +
            source_path = None
         
     | 
| 4 | 
         
            +
            target_path = None
         
     | 
| 5 | 
         
            +
            output_path = None
         
     | 
| 6 | 
         
            +
            frame_processors: List[str] = []
         
     | 
| 7 | 
         
            +
            keep_fps = None
         
     | 
| 8 | 
         
            +
            keep_audio = None
         
     | 
| 9 | 
         
            +
            keep_frames = None
         
     | 
| 10 | 
         
            +
            many_faces = None
         
     | 
| 11 | 
         
            +
            video_encoder = None
         
     | 
| 12 | 
         
            +
            video_quality = None
         
     | 
| 13 | 
         
            +
            max_memory = None
         
     | 
| 14 | 
         
            +
            execution_providers: List[str] = []
         
     | 
| 15 | 
         
            +
            execution_threads = None
         
     | 
| 16 | 
         
            +
            headless = None
         
     | 
| 17 | 
         
            +
            log_level = 'error'
         
     | 
    	
        roop/metadata.py
    ADDED
    
    | 
         @@ -0,0 +1,2 @@ 
     | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            name = 'roop'
         
     | 
| 2 | 
         
            +
            version = '1.1.0'
         
     | 
    	
        roop/predicter.py
    ADDED
    
    | 
         @@ -0,0 +1,19 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            import numpy
         
     | 
| 2 | 
         
            +
            import opennsfw2
         
     | 
| 3 | 
         
            +
            from PIL import Image
         
     | 
| 4 | 
         
            +
             
     | 
| 5 | 
         
            +
            from roop.typing import Frame
         
     | 
| 6 | 
         
            +
             
     | 
| 7 | 
         
            +
            MAX_PROBABILITY = 0.85
         
     | 
| 8 | 
         
            +
             
     | 
| 9 | 
         
            +
             
     | 
| 10 | 
         
            +
            def predict_frame(target_frame: Frame) -> bool:
         
     | 
| 11 | 
         
            +
                return False
         
     | 
| 12 | 
         
            +
             
     | 
| 13 | 
         
            +
             
     | 
| 14 | 
         
            +
            def predict_image(target_path: str) -> bool:
         
     | 
| 15 | 
         
            +
                return False
         
     | 
| 16 | 
         
            +
             
     | 
| 17 | 
         
            +
             
     | 
| 18 | 
         
            +
            def predict_video(target_path: str) -> bool:
         
     | 
| 19 | 
         
            +
                return False
         
     | 
    	
        roop/processors/__init__.py
    ADDED
    
    | 
         
            File without changes
         
     | 
    	
        roop/processors/__pycache__/___init__.cpython-312.pyc
    ADDED
    
    | 
         Binary file (162 Bytes). View file 
     | 
| 
         | 
    	
        roop/processors/frame/__init__.py
    ADDED
    
    | 
         
            File without changes
         
     | 
    	
        roop/processors/frame/__pycache__/___init__.cpython-312.pyc
    ADDED
    
    | 
         Binary file (168 Bytes). View file 
     | 
| 
         | 
    	
        roop/processors/frame/__pycache__/core.cpython-312.pyc
    ADDED
    
    | 
         Binary file (5.43 kB). View file 
     | 
| 
         | 
    	
        roop/processors/frame/__pycache__/face_swapper.cpython-312.pyc
    ADDED
    
    | 
         Binary file (4.92 kB). View file 
     | 
| 
         | 
    	
        roop/processors/frame/core.py
    ADDED
    
    | 
         @@ -0,0 +1,95 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            import os
         
     | 
| 2 | 
         
            +
            import importlib
         
     | 
| 3 | 
         
            +
            import psutil
         
     | 
| 4 | 
         
            +
            from concurrent.futures import ThreadPoolExecutor, as_completed
         
     | 
| 5 | 
         
            +
            from queue import Queue
         
     | 
| 6 | 
         
            +
            from types import ModuleType
         
     | 
| 7 | 
         
            +
            from typing import Any, List, Callable
         
     | 
| 8 | 
         
            +
            from tqdm import tqdm
         
     | 
| 9 | 
         
            +
             
     | 
| 10 | 
         
            +
            import roop
         
     | 
| 11 | 
         
            +
             
     | 
| 12 | 
         
            +
            FRAME_PROCESSORS_MODULES: List[ModuleType] = []
         
     | 
| 13 | 
         
            +
            FRAME_PROCESSORS_INTERFACE = [
         
     | 
| 14 | 
         
            +
                'pre_check',
         
     | 
| 15 | 
         
            +
                'pre_start',
         
     | 
| 16 | 
         
            +
                'process_frame',
         
     | 
| 17 | 
         
            +
                'process_frames',
         
     | 
| 18 | 
         
            +
                'process_image',
         
     | 
| 19 | 
         
            +
                'process_video',
         
     | 
| 20 | 
         
            +
                'post_process'
         
     | 
| 21 | 
         
            +
            ]
         
     | 
| 22 | 
         
            +
             
     | 
| 23 | 
         
            +
             
     | 
| 24 | 
         
            +
            def load_frame_processor_module(frame_processor: str) -> Any:
         
     | 
| 25 | 
         
            +
                try:
         
     | 
| 26 | 
         
            +
                    frame_processor_module = importlib.import_module(f'roop.processors.frame.{frame_processor}')
         
     | 
| 27 | 
         
            +
                    for method_name in FRAME_PROCESSORS_INTERFACE:
         
     | 
| 28 | 
         
            +
                        if not hasattr(frame_processor_module, method_name):
         
     | 
| 29 | 
         
            +
                            raise NotImplementedError
         
     | 
| 30 | 
         
            +
                except (ImportError, NotImplementedError):
         
     | 
| 31 | 
         
            +
                    quit(f'Frame processor {frame_processor} crashed.')
         
     | 
| 32 | 
         
            +
                return frame_processor_module
         
     | 
| 33 | 
         
            +
             
     | 
| 34 | 
         
            +
             
     | 
| 35 | 
         
            +
            def get_frame_processors_modules(frame_processors: List[str]) -> List[ModuleType]:
         
     | 
| 36 | 
         
            +
                global FRAME_PROCESSORS_MODULES
         
     | 
| 37 | 
         
            +
             
     | 
| 38 | 
         
            +
                for frame_processor in frame_processors:
         
     | 
| 39 | 
         
            +
                    found = False
         
     | 
| 40 | 
         
            +
                    for frame_processor_module in FRAME_PROCESSORS_MODULES:
         
     | 
| 41 | 
         
            +
                        if frame_processor_module.frame_name == frame_processor:
         
     | 
| 42 | 
         
            +
                            found = True
         
     | 
| 43 | 
         
            +
                            break
         
     | 
| 44 | 
         
            +
                    if not found:
         
     | 
| 45 | 
         
            +
                        frame_processor_module = load_frame_processor_module(frame_processor)
         
     | 
| 46 | 
         
            +
                        FRAME_PROCESSORS_MODULES.append(frame_processor_module)
         
     | 
| 47 | 
         
            +
                # if not FRAME_PROCESSORS_MODULES:
         
     | 
| 48 | 
         
            +
                    
         
     | 
| 49 | 
         
            +
                return FRAME_PROCESSORS_MODULES
         
     | 
| 50 | 
         
            +
             
     | 
| 51 | 
         
            +
             
     | 
| 52 | 
         
            +
            def multi_process_frame(source_path: str, temp_frame_paths: List[str], process_frames: Callable[[str, List[str], Any], None], update: Callable[[], None]) -> None:
         
     | 
| 53 | 
         
            +
                with ThreadPoolExecutor(max_workers=roop.globals.execution_threads) as executor:
         
     | 
| 54 | 
         
            +
                    futures = []
         
     | 
| 55 | 
         
            +
                    queue = create_queue(temp_frame_paths)
         
     | 
| 56 | 
         
            +
                    queue_per_future = len(temp_frame_paths) // roop.globals.execution_threads
         
     | 
| 57 | 
         
            +
                    while not queue.empty():
         
     | 
| 58 | 
         
            +
                        future = executor.submit(process_frames, source_path, pick_queue(queue, queue_per_future), update)
         
     | 
| 59 | 
         
            +
                        futures.append(future)
         
     | 
| 60 | 
         
            +
                    for future in as_completed(futures):
         
     | 
| 61 | 
         
            +
                        future.result()
         
     | 
| 62 | 
         
            +
             
     | 
| 63 | 
         
            +
             
     | 
| 64 | 
         
            +
            def create_queue(temp_frame_paths: List[str]) -> Queue[str]:
         
     | 
| 65 | 
         
            +
                queue: Queue[str] = Queue()
         
     | 
| 66 | 
         
            +
                for frame_path in temp_frame_paths:
         
     | 
| 67 | 
         
            +
                    queue.put(frame_path)
         
     | 
| 68 | 
         
            +
                return queue
         
     | 
| 69 | 
         
            +
             
     | 
| 70 | 
         
            +
             
     | 
| 71 | 
         
            +
            def pick_queue(queue: Queue[str], queue_per_future: int) -> List[str]:
         
     | 
| 72 | 
         
            +
                queues = []
         
     | 
| 73 | 
         
            +
                for _ in range(queue_per_future):
         
     | 
| 74 | 
         
            +
                    if not queue.empty():
         
     | 
| 75 | 
         
            +
                        queues.append(queue.get())
         
     | 
| 76 | 
         
            +
                return queues
         
     | 
| 77 | 
         
            +
             
     | 
| 78 | 
         
            +
             
     | 
| 79 | 
         
            +
            def process_video(source_path: str, frame_paths: list[str], process_frames: Callable[[str, List[str], Any], None]) -> None:
         
     | 
| 80 | 
         
            +
                progress_bar_format = '{l_bar}{bar}| {n_fmt}/{total_fmt} [{elapsed}<{remaining}, {rate_fmt}{postfix}]'
         
     | 
| 81 | 
         
            +
                total = len(frame_paths)
         
     | 
| 82 | 
         
            +
                with tqdm(total=total, desc='Processing', unit='frame', dynamic_ncols=True, bar_format=progress_bar_format) as progress:
         
     | 
| 83 | 
         
            +
                    multi_process_frame(source_path, frame_paths, process_frames, lambda: update_progress(progress))
         
     | 
| 84 | 
         
            +
             
     | 
| 85 | 
         
            +
             
     | 
| 86 | 
         
            +
            def update_progress(progress: Any = None) -> None:
         
     | 
| 87 | 
         
            +
                process = psutil.Process(os.getpid())
         
     | 
| 88 | 
         
            +
                memory_usage = process.memory_info().rss / 1024 / 1024 / 1024
         
     | 
| 89 | 
         
            +
                progress.set_postfix({
         
     | 
| 90 | 
         
            +
                    'memory_usage': '{:.2f}'.format(memory_usage).zfill(5) + 'GB',
         
     | 
| 91 | 
         
            +
                    'execution_providers': roop.globals.execution_providers,
         
     | 
| 92 | 
         
            +
                    'execution_threads': roop.globals.execution_threads
         
     | 
| 93 | 
         
            +
                })
         
     | 
| 94 | 
         
            +
                progress.refresh()
         
     | 
| 95 | 
         
            +
                progress.update(1)
         
     | 
    	
        roop/processors/frame/face_enhancer.py
    ADDED
    
    | 
         @@ -0,0 +1,89 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            from typing import Any, List, Callable
         
     | 
| 2 | 
         
            +
            import cv2
         
     | 
| 3 | 
         
            +
            import threading
         
     | 
| 4 | 
         
            +
            import gfpgan
         
     | 
| 5 | 
         
            +
             
     | 
| 6 | 
         
            +
            import roop.globals
         
     | 
| 7 | 
         
            +
            import roop.processors.frame.core
         
     | 
| 8 | 
         
            +
            from roop.core import update_status
         
     | 
| 9 | 
         
            +
            from roop.face_analyser import get_one_face
         
     | 
| 10 | 
         
            +
            from roop.typing import Frame, Face
         
     | 
| 11 | 
         
            +
            from roop.utilities import conditional_download, resolve_relative_path, is_image, is_video
         
     | 
| 12 | 
         
            +
            import torch
         
     | 
| 13 | 
         
            +
             
     | 
| 14 | 
         
            +
            FACE_ENHANCER = None
         
     | 
| 15 | 
         
            +
            THREAD_SEMAPHORE = threading.Semaphore()
         
     | 
| 16 | 
         
            +
            THREAD_LOCK = threading.Lock()
         
     | 
| 17 | 
         
            +
            NAME = 'ROOP.FACE-ENHANCER'
         
     | 
| 18 | 
         
            +
            frame_name = 'face_enhancer'
         
     | 
| 19 | 
         
            +
             
     | 
| 20 | 
         
            +
            if torch.cuda.is_available():
         
     | 
| 21 | 
         
            +
                device='cuda'
         
     | 
| 22 | 
         
            +
            else:
         
     | 
| 23 | 
         
            +
                device='cpu'
         
     | 
| 24 | 
         
            +
             
     | 
| 25 | 
         
            +
             
     | 
| 26 | 
         
            +
            def get_face_enhancer() -> Any:
         
     | 
| 27 | 
         
            +
                global FACE_ENHANCER
         
     | 
| 28 | 
         
            +
             
     | 
| 29 | 
         
            +
                with THREAD_LOCK:
         
     | 
| 30 | 
         
            +
                    if FACE_ENHANCER is None:
         
     | 
| 31 | 
         
            +
                        model_path = resolve_relative_path('../models/GFPGANv1.4.pth')
         
     | 
| 32 | 
         
            +
                        # todo: set models path https://github.com/TencentARC/GFPGAN/issues/399
         
     | 
| 33 | 
         
            +
                        FACE_ENHANCER = gfpgan.GFPGANer(model_path=model_path, upscale=1,device=device) # type: ignore[attr-defined]
         
     | 
| 34 | 
         
            +
                return FACE_ENHANCER
         
     | 
| 35 | 
         
            +
             
     | 
| 36 | 
         
            +
             
     | 
| 37 | 
         
            +
            def pre_check() -> bool:
         
     | 
| 38 | 
         
            +
                download_directory_path = resolve_relative_path('../models')
         
     | 
| 39 | 
         
            +
                # conditional_download(download_directory_path, ['https://huggingface.co/henryruhs/roop/resolve/main/GFPGANv1.4.pth'])
         
     | 
| 40 | 
         
            +
                conditional_download(download_directory_path, ['https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.4.pth'])
         
     | 
| 41 | 
         
            +
                return True
         
     | 
| 42 | 
         
            +
             
     | 
| 43 | 
         
            +
             
     | 
| 44 | 
         
            +
            def pre_start() -> bool:
         
     | 
| 45 | 
         
            +
                if not is_image(roop.globals.target_path) and not is_video(roop.globals.target_path):
         
     | 
| 46 | 
         
            +
                    update_status('Select an image or video for target path.', NAME)
         
     | 
| 47 | 
         
            +
                    return False
         
     | 
| 48 | 
         
            +
                return True
         
     | 
| 49 | 
         
            +
             
     | 
| 50 | 
         
            +
             
     | 
| 51 | 
         
            +
            def post_process() -> None:
         
     | 
| 52 | 
         
            +
                global FACE_ENHANCER
         
     | 
| 53 | 
         
            +
             
     | 
| 54 | 
         
            +
                FACE_ENHANCER = None
         
     | 
| 55 | 
         
            +
             
     | 
| 56 | 
         
            +
             
     | 
| 57 | 
         
            +
            def enhance_face(temp_frame: Frame) -> Frame:
         
     | 
| 58 | 
         
            +
                with THREAD_SEMAPHORE:
         
     | 
| 59 | 
         
            +
                    _, _, temp_frame = get_face_enhancer().enhance(
         
     | 
| 60 | 
         
            +
                        temp_frame,
         
     | 
| 61 | 
         
            +
                        paste_back=True
         
     | 
| 62 | 
         
            +
                    )
         
     | 
| 63 | 
         
            +
                return temp_frame
         
     | 
| 64 | 
         
            +
             
     | 
| 65 | 
         
            +
             
     | 
| 66 | 
         
            +
            def process_frame(source_face: Face, temp_frame: Frame) -> Frame:
         
     | 
| 67 | 
         
            +
                target_face = get_one_face(temp_frame)
         
     | 
| 68 | 
         
            +
                if target_face:
         
     | 
| 69 | 
         
            +
                    temp_frame = enhance_face(temp_frame)
         
     | 
| 70 | 
         
            +
                return temp_frame
         
     | 
| 71 | 
         
            +
             
     | 
| 72 | 
         
            +
             
     | 
| 73 | 
         
            +
            def process_frames(source_path: str, temp_frame_paths: List[str], update: Callable[[], None]) -> None:
         
     | 
| 74 | 
         
            +
                for temp_frame_path in temp_frame_paths:
         
     | 
| 75 | 
         
            +
                    temp_frame = cv2.imread(temp_frame_path)
         
     | 
| 76 | 
         
            +
                    result = process_frame(None, temp_frame)
         
     | 
| 77 | 
         
            +
                    cv2.imwrite(temp_frame_path, result)
         
     | 
| 78 | 
         
            +
                    if update:
         
     | 
| 79 | 
         
            +
                        update()
         
     | 
| 80 | 
         
            +
             
     | 
| 81 | 
         
            +
             
     | 
| 82 | 
         
            +
            def process_image(source_path: str, target_path: str, output_path: str) -> None:
         
     | 
| 83 | 
         
            +
                target_frame = cv2.imread(target_path)
         
     | 
| 84 | 
         
            +
                result = process_frame(None, target_frame)
         
     | 
| 85 | 
         
            +
                cv2.imwrite(output_path, result)
         
     | 
| 86 | 
         
            +
             
     | 
| 87 | 
         
            +
             
     | 
| 88 | 
         
            +
            def process_video(source_path: str, temp_frame_paths: List[str]) -> None:
         
     | 
| 89 | 
         
            +
                roop.processors.frame.core.process_video(None, temp_frame_paths, process_frames)
         
     | 
    	
        roop/processors/frame/face_swapper.py
    ADDED
    
    | 
         @@ -0,0 +1,89 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            from typing import Any, List, Callable
         
     | 
| 2 | 
         
            +
            import cv2
         
     | 
| 3 | 
         
            +
            import insightface
         
     | 
| 4 | 
         
            +
            import threading
         
     | 
| 5 | 
         
            +
             
     | 
| 6 | 
         
            +
            import roop.globals
         
     | 
| 7 | 
         
            +
            import roop.processors.frame.core
         
     | 
| 8 | 
         
            +
            from roop.core import update_status
         
     | 
| 9 | 
         
            +
            from roop.face_analyser import get_one_face, get_many_faces
         
     | 
| 10 | 
         
            +
            from roop.typing import Face, Frame
         
     | 
| 11 | 
         
            +
            from roop.utilities import conditional_download, resolve_relative_path, is_image, is_video
         
     | 
| 12 | 
         
            +
             
     | 
| 13 | 
         
            +
            FACE_SWAPPER = None
         
     | 
| 14 | 
         
            +
            THREAD_LOCK = threading.Lock()
         
     | 
| 15 | 
         
            +
            NAME = 'ROOP.FACE-SWAPPER'
         
     | 
| 16 | 
         
            +
            frame_name = 'face_swapper'
         
     | 
| 17 | 
         
            +
             
     | 
| 18 | 
         
            +
            def get_face_swapper() -> Any:
         
     | 
| 19 | 
         
            +
                global FACE_SWAPPER
         
     | 
| 20 | 
         
            +
             
     | 
| 21 | 
         
            +
                with THREAD_LOCK:
         
     | 
| 22 | 
         
            +
                    if FACE_SWAPPER is None:
         
     | 
| 23 | 
         
            +
                        model_path = resolve_relative_path('../models/inswapper_128.onnx')
         
     | 
| 24 | 
         
            +
                        FACE_SWAPPER = insightface.model_zoo.get_model(model_path, providers=roop.globals.execution_providers)
         
     | 
| 25 | 
         
            +
                return FACE_SWAPPER
         
     | 
| 26 | 
         
            +
             
     | 
| 27 | 
         
            +
             
     | 
| 28 | 
         
            +
            def pre_check() -> bool:
         
     | 
| 29 | 
         
            +
                download_directory_path = resolve_relative_path('../models')
         
     | 
| 30 | 
         
            +
                # conditional_download(download_directory_path, ['https://huggingface.co/henryruhs/roop/resolve/main/inswapper_128.onnx'])
         
     | 
| 31 | 
         
            +
                conditional_download(download_directory_path, ['https://huggingface.co/thebiglaskowski/inswapper_128.onnx/resolve/main/inswapper_128.onnx'])
         
     | 
| 32 | 
         
            +
                return True
         
     | 
| 33 | 
         
            +
             
     | 
| 34 | 
         
            +
             
     | 
| 35 | 
         
            +
            def pre_start() -> bool:
         
     | 
| 36 | 
         
            +
                if not is_image(roop.globals.source_path):
         
     | 
| 37 | 
         
            +
                    update_status('Select an image for source path.', NAME)
         
     | 
| 38 | 
         
            +
                    return False
         
     | 
| 39 | 
         
            +
                elif not get_one_face(cv2.imread(roop.globals.source_path)):
         
     | 
| 40 | 
         
            +
                    update_status('No face in source path detected.', NAME)
         
     | 
| 41 | 
         
            +
                    return False
         
     | 
| 42 | 
         
            +
                if not is_image(roop.globals.target_path) and not is_video(roop.globals.target_path):
         
     | 
| 43 | 
         
            +
                    update_status('Select an image or video for target path.', NAME)
         
     | 
| 44 | 
         
            +
                    return False
         
     | 
| 45 | 
         
            +
                return True
         
     | 
| 46 | 
         
            +
             
     | 
| 47 | 
         
            +
             
     | 
| 48 | 
         
            +
            def post_process() -> None:
         
     | 
| 49 | 
         
            +
                global FACE_SWAPPER
         
     | 
| 50 | 
         
            +
             
     | 
| 51 | 
         
            +
                FACE_SWAPPER = None
         
     | 
| 52 | 
         
            +
             
     | 
| 53 | 
         
            +
             
     | 
| 54 | 
         
            +
            def swap_face(source_face: Face, target_face: Face, temp_frame: Frame) -> Frame:
         
     | 
| 55 | 
         
            +
                return get_face_swapper().get(temp_frame, target_face, source_face, paste_back=True)
         
     | 
| 56 | 
         
            +
             
     | 
| 57 | 
         
            +
             
     | 
| 58 | 
         
            +
            def process_frame(source_face: Face, temp_frame: Frame) -> Frame:
         
     | 
| 59 | 
         
            +
                if roop.globals.many_faces:
         
     | 
| 60 | 
         
            +
                    many_faces = get_many_faces(temp_frame)
         
     | 
| 61 | 
         
            +
                    if many_faces:
         
     | 
| 62 | 
         
            +
                        for target_face in many_faces:
         
     | 
| 63 | 
         
            +
                            temp_frame = swap_face(source_face, target_face, temp_frame)
         
     | 
| 64 | 
         
            +
                else:
         
     | 
| 65 | 
         
            +
                    target_face = get_one_face(temp_frame)
         
     | 
| 66 | 
         
            +
                    if target_face:
         
     | 
| 67 | 
         
            +
                        temp_frame = swap_face(source_face, target_face, temp_frame)
         
     | 
| 68 | 
         
            +
                return temp_frame
         
     | 
| 69 | 
         
            +
             
     | 
| 70 | 
         
            +
             
     | 
| 71 | 
         
            +
            def process_frames(source_path: str, temp_frame_paths: List[str], update: Callable[[], None]) -> None:
         
     | 
| 72 | 
         
            +
                source_face = get_one_face(cv2.imread(source_path))
         
     | 
| 73 | 
         
            +
                for temp_frame_path in temp_frame_paths:
         
     | 
| 74 | 
         
            +
                    temp_frame = cv2.imread(temp_frame_path)
         
     | 
| 75 | 
         
            +
                    result = process_frame(source_face, temp_frame)
         
     | 
| 76 | 
         
            +
                    cv2.imwrite(temp_frame_path, result)
         
     | 
| 77 | 
         
            +
                    if update:
         
     | 
| 78 | 
         
            +
                        update()
         
     | 
| 79 | 
         
            +
             
     | 
| 80 | 
         
            +
             
     | 
| 81 | 
         
            +
            def process_image(source_path: str, target_path: str, output_path: str) -> None:
         
     | 
| 82 | 
         
            +
                source_face = get_one_face(cv2.imread(source_path))
         
     | 
| 83 | 
         
            +
                target_frame = cv2.imread(target_path)
         
     | 
| 84 | 
         
            +
                result = process_frame(source_face, target_frame)
         
     | 
| 85 | 
         
            +
                cv2.imwrite(output_path, result)
         
     | 
| 86 | 
         
            +
             
     | 
| 87 | 
         
            +
             
     | 
| 88 | 
         
            +
            def process_video(source_path: str, temp_frame_paths: List[str]) -> None:
         
     | 
| 89 | 
         
            +
                roop.processors.frame.core.process_video(source_path, temp_frame_paths, process_frames)
         
     | 
    	
        roop/typing.py
    ADDED
    
    | 
         @@ -0,0 +1,7 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            from typing import Any
         
     | 
| 2 | 
         
            +
             
     | 
| 3 | 
         
            +
            from insightface.app.common import Face
         
     | 
| 4 | 
         
            +
            import numpy
         
     | 
| 5 | 
         
            +
             
     | 
| 6 | 
         
            +
            Face = Face
         
     | 
| 7 | 
         
            +
            Frame = numpy.ndarray[Any, Any]
         
     | 
    	
        roop/ui.json
    ADDED
    
    | 
         @@ -0,0 +1,158 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            {
         
     | 
| 2 | 
         
            +
              "CTk": {
         
     | 
| 3 | 
         
            +
                "fg_color": ["gray95", "gray10"]
         
     | 
| 4 | 
         
            +
              },
         
     | 
| 5 | 
         
            +
              "CTkToplevel": {
         
     | 
| 6 | 
         
            +
                "fg_color": ["gray95", "gray10"]
         
     | 
| 7 | 
         
            +
              },
         
     | 
| 8 | 
         
            +
              "CTkFrame": {
         
     | 
| 9 | 
         
            +
                "corner_radius": 6,
         
     | 
| 10 | 
         
            +
                "border_width": 0,
         
     | 
| 11 | 
         
            +
                "fg_color": ["gray90", "gray13"],
         
     | 
| 12 | 
         
            +
                "top_fg_color": ["gray85", "gray16"],
         
     | 
| 13 | 
         
            +
                "border_color": ["gray65", "gray28"]
         
     | 
| 14 | 
         
            +
              },
         
     | 
| 15 | 
         
            +
              "CTkButton": {
         
     | 
| 16 | 
         
            +
                "corner_radius": 6,
         
     | 
| 17 | 
         
            +
                "border_width": 0,
         
     | 
| 18 | 
         
            +
                "fg_color": ["#3a7ebf", "#1f538d"],
         
     | 
| 19 | 
         
            +
                "hover_color": ["#325882", "#14375e"],
         
     | 
| 20 | 
         
            +
                "border_color": ["#3E454A", "#949A9F"],
         
     | 
| 21 | 
         
            +
                "text_color": ["#DCE4EE", "#DCE4EE"],
         
     | 
| 22 | 
         
            +
                "text_color_disabled": ["gray74", "gray60"]
         
     | 
| 23 | 
         
            +
              },
         
     | 
| 24 | 
         
            +
              "CTkLabel": {
         
     | 
| 25 | 
         
            +
                "corner_radius": 0,
         
     | 
| 26 | 
         
            +
                "fg_color": "transparent",
         
     | 
| 27 | 
         
            +
                "text_color": ["gray14", "gray84"]
         
     | 
| 28 | 
         
            +
              },
         
     | 
| 29 | 
         
            +
              "CTkEntry": {
         
     | 
| 30 | 
         
            +
                "corner_radius": 6,
         
     | 
| 31 | 
         
            +
                "border_width": 2,
         
     | 
| 32 | 
         
            +
                "fg_color": ["#F9F9FA", "#343638"],
         
     | 
| 33 | 
         
            +
                "border_color": ["#979DA2", "#565B5E"],
         
     | 
| 34 | 
         
            +
                "text_color": ["gray14", "gray84"],
         
     | 
| 35 | 
         
            +
                "placeholder_text_color": ["gray52", "gray62"]
         
     | 
| 36 | 
         
            +
              },
         
     | 
| 37 | 
         
            +
              "CTkCheckbox": {
         
     | 
| 38 | 
         
            +
                "corner_radius": 6,
         
     | 
| 39 | 
         
            +
                "border_width": 3,
         
     | 
| 40 | 
         
            +
                "fg_color": ["#3a7ebf", "#1f538d"],
         
     | 
| 41 | 
         
            +
                "border_color": ["#3E454A", "#949A9F"],
         
     | 
| 42 | 
         
            +
                "hover_color": ["#325882", "#14375e"],
         
     | 
| 43 | 
         
            +
                "checkmark_color": ["#DCE4EE", "gray90"],
         
     | 
| 44 | 
         
            +
                "text_color": ["gray14", "gray84"],
         
     | 
| 45 | 
         
            +
                "text_color_disabled": ["gray60", "gray45"]
         
     | 
| 46 | 
         
            +
              },
         
     | 
| 47 | 
         
            +
              "CTkSwitch": {
         
     | 
| 48 | 
         
            +
                "corner_radius": 1000,
         
     | 
| 49 | 
         
            +
                "border_width": 3,
         
     | 
| 50 | 
         
            +
                "button_length": 0,
         
     | 
| 51 | 
         
            +
                "fg_color": ["#939BA2", "#4A4D50"],
         
     | 
| 52 | 
         
            +
                "progress_color": ["#3a7ebf", "#1f538d"],
         
     | 
| 53 | 
         
            +
                "button_color": ["gray36", "#D5D9DE"],
         
     | 
| 54 | 
         
            +
                "button_hover_color": ["gray20", "gray100"],
         
     | 
| 55 | 
         
            +
                "text_color": ["gray14", "gray84"],
         
     | 
| 56 | 
         
            +
                "text_color_disabled": ["gray60", "gray45"]
         
     | 
| 57 | 
         
            +
              },
         
     | 
| 58 | 
         
            +
              "CTkRadiobutton": {
         
     | 
| 59 | 
         
            +
                "corner_radius": 1000,
         
     | 
| 60 | 
         
            +
                "border_width_checked": 6,
         
     | 
| 61 | 
         
            +
                "border_width_unchecked": 3,
         
     | 
| 62 | 
         
            +
                "fg_color": ["#3a7ebf", "#1f538d"],
         
     | 
| 63 | 
         
            +
                "border_color": ["#3E454A", "#949A9F"],
         
     | 
| 64 | 
         
            +
                "hover_color": ["#325882", "#14375e"],
         
     | 
| 65 | 
         
            +
                "text_color": ["gray14", "gray84"],
         
     | 
| 66 | 
         
            +
                "text_color_disabled": ["gray60", "gray45"]
         
     | 
| 67 | 
         
            +
              },
         
     | 
| 68 | 
         
            +
              "CTkProgressBar": {
         
     | 
| 69 | 
         
            +
                "corner_radius": 1000,
         
     | 
| 70 | 
         
            +
                "border_width": 0,
         
     | 
| 71 | 
         
            +
                "fg_color": ["#939BA2", "#4A4D50"],
         
     | 
| 72 | 
         
            +
                "progress_color": ["#3a7ebf", "#1f538d"],
         
     | 
| 73 | 
         
            +
                "border_color": ["gray", "gray"]
         
     | 
| 74 | 
         
            +
              },
         
     | 
| 75 | 
         
            +
              "CTkSlider": {
         
     | 
| 76 | 
         
            +
                "corner_radius": 1000,
         
     | 
| 77 | 
         
            +
                "button_corner_radius": 1000,
         
     | 
| 78 | 
         
            +
                "border_width": 6,
         
     | 
| 79 | 
         
            +
                "button_length": 0,
         
     | 
| 80 | 
         
            +
                "fg_color": ["#939BA2", "#4A4D50"],
         
     | 
| 81 | 
         
            +
                "progress_color": ["gray40", "#AAB0B5"],
         
     | 
| 82 | 
         
            +
                "button_color": ["#3a7ebf", "#1f538d"],
         
     | 
| 83 | 
         
            +
                "button_hover_color": ["#325882", "#14375e"]
         
     | 
| 84 | 
         
            +
              },
         
     | 
| 85 | 
         
            +
              "CTkOptionMenu": {
         
     | 
| 86 | 
         
            +
                "corner_radius": 6,
         
     | 
| 87 | 
         
            +
                "fg_color": ["#3a7ebf", "#1f538d"],
         
     | 
| 88 | 
         
            +
                "button_color": ["#325882", "#14375e"],
         
     | 
| 89 | 
         
            +
                "button_hover_color": ["#234567", "#1e2c40"],
         
     | 
| 90 | 
         
            +
                "text_color": ["#DCE4EE", "#DCE4EE"],
         
     | 
| 91 | 
         
            +
                "text_color_disabled": ["gray74", "gray60"]
         
     | 
| 92 | 
         
            +
              },
         
     | 
| 93 | 
         
            +
              "CTkComboBox": {
         
     | 
| 94 | 
         
            +
                "corner_radius": 6,
         
     | 
| 95 | 
         
            +
                "border_width": 2,
         
     | 
| 96 | 
         
            +
                "fg_color": ["#F9F9FA", "#343638"],
         
     | 
| 97 | 
         
            +
                "border_color": ["#979DA2", "#565B5E"],
         
     | 
| 98 | 
         
            +
                "button_color": ["#979DA2", "#565B5E"],
         
     | 
| 99 | 
         
            +
                "button_hover_color": ["#6E7174", "#7A848D"],
         
     | 
| 100 | 
         
            +
                "text_color": ["gray14", "gray84"],
         
     | 
| 101 | 
         
            +
                "text_color_disabled": ["gray50", "gray45"]
         
     | 
| 102 | 
         
            +
              },
         
     | 
| 103 | 
         
            +
              "CTkScrollbar": {
         
     | 
| 104 | 
         
            +
                "corner_radius": 1000,
         
     | 
| 105 | 
         
            +
                "border_spacing": 4,
         
     | 
| 106 | 
         
            +
                "fg_color": "transparent",
         
     | 
| 107 | 
         
            +
                "button_color": ["gray55", "gray41"],
         
     | 
| 108 | 
         
            +
                "button_hover_color": ["gray40", "gray53"]
         
     | 
| 109 | 
         
            +
              },
         
     | 
| 110 | 
         
            +
              "CTkSegmentedButton": {
         
     | 
| 111 | 
         
            +
                "corner_radius": 6,
         
     | 
| 112 | 
         
            +
                "border_width": 2,
         
     | 
| 113 | 
         
            +
                "fg_color": ["#979DA2", "gray29"],
         
     | 
| 114 | 
         
            +
                "selected_color": ["#3a7ebf", "#1f538d"],
         
     | 
| 115 | 
         
            +
                "selected_hover_color": ["#325882", "#14375e"],
         
     | 
| 116 | 
         
            +
                "unselected_color": ["#979DA2", "gray29"],
         
     | 
| 117 | 
         
            +
                "unselected_hover_color": ["gray70", "gray41"],
         
     | 
| 118 | 
         
            +
                "text_color": ["#DCE4EE", "#DCE4EE"],
         
     | 
| 119 | 
         
            +
                "text_color_disabled": ["gray74", "gray60"]
         
     | 
| 120 | 
         
            +
              },
         
     | 
| 121 | 
         
            +
              "CTkTextbox": {
         
     | 
| 122 | 
         
            +
                "corner_radius": 6,
         
     | 
| 123 | 
         
            +
                "border_width": 0,
         
     | 
| 124 | 
         
            +
                "fg_color": ["gray100", "gray20"],
         
     | 
| 125 | 
         
            +
                "border_color": ["#979DA2", "#565B5E"],
         
     | 
| 126 | 
         
            +
                "text_color": ["gray14", "gray84"],
         
     | 
| 127 | 
         
            +
                "scrollbar_button_color": ["gray55", "gray41"],
         
     | 
| 128 | 
         
            +
                "scrollbar_button_hover_color": ["gray40", "gray53"]
         
     | 
| 129 | 
         
            +
              },
         
     | 
| 130 | 
         
            +
              "CTkScrollableFrame": {
         
     | 
| 131 | 
         
            +
                "label_fg_color": ["gray80", "gray21"]
         
     | 
| 132 | 
         
            +
              },
         
     | 
| 133 | 
         
            +
              "DropdownMenu": {
         
     | 
| 134 | 
         
            +
                "fg_color": ["gray90", "gray20"],
         
     | 
| 135 | 
         
            +
                "hover_color": ["gray75", "gray28"],
         
     | 
| 136 | 
         
            +
                "text_color": ["gray14", "gray84"]
         
     | 
| 137 | 
         
            +
              },
         
     | 
| 138 | 
         
            +
              "CTkFont": {
         
     | 
| 139 | 
         
            +
                "macOS": {
         
     | 
| 140 | 
         
            +
                  "family": "Avenir",
         
     | 
| 141 | 
         
            +
                  "size": 12,
         
     | 
| 142 | 
         
            +
                  "weight": "normal"
         
     | 
| 143 | 
         
            +
                },
         
     | 
| 144 | 
         
            +
                "Windows": {
         
     | 
| 145 | 
         
            +
                  "family": "Corbel",
         
     | 
| 146 | 
         
            +
                  "size": 12,
         
     | 
| 147 | 
         
            +
                  "weight": "normal"
         
     | 
| 148 | 
         
            +
                },
         
     | 
| 149 | 
         
            +
                "Linux": {
         
     | 
| 150 | 
         
            +
                  "family": "Montserrat",
         
     | 
| 151 | 
         
            +
                  "size": 12,
         
     | 
| 152 | 
         
            +
                  "weight": "normal"
         
     | 
| 153 | 
         
            +
                }
         
     | 
| 154 | 
         
            +
              },
         
     | 
| 155 | 
         
            +
              "RoopDonate": {
         
     | 
| 156 | 
         
            +
                "text_color": ["#3a7ebf", "gray60"]
         
     | 
| 157 | 
         
            +
              }
         
     | 
| 158 | 
         
            +
            }
         
     | 
    	
        roop/ui.py
    ADDED
    
    | 
         @@ -0,0 +1,231 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            import os
         
     | 
| 2 | 
         
            +
            import webbrowser
         
     | 
| 3 | 
         
            +
            import customtkinter as ctk
         
     | 
| 4 | 
         
            +
            from typing import Callable, Tuple
         
     | 
| 5 | 
         
            +
            import cv2
         
     | 
| 6 | 
         
            +
            from PIL import Image, ImageOps
         
     | 
| 7 | 
         
            +
             
     | 
| 8 | 
         
            +
            import roop.globals
         
     | 
| 9 | 
         
            +
            import roop.metadata
         
     | 
| 10 | 
         
            +
            from roop.face_analyser import get_one_face
         
     | 
| 11 | 
         
            +
            from roop.capturer import get_video_frame, get_video_frame_total
         
     | 
| 12 | 
         
            +
            from roop.predicter import predict_frame
         
     | 
| 13 | 
         
            +
            from roop.processors.frame.core import get_frame_processors_modules
         
     | 
| 14 | 
         
            +
            from roop.utilities import is_image, is_video, resolve_relative_path
         
     | 
| 15 | 
         
            +
             
     | 
| 16 | 
         
            +
            ROOT = None
         
     | 
| 17 | 
         
            +
            ROOT_HEIGHT = 700
         
     | 
| 18 | 
         
            +
            ROOT_WIDTH = 600
         
     | 
| 19 | 
         
            +
             
     | 
| 20 | 
         
            +
            PREVIEW = None
         
     | 
| 21 | 
         
            +
            PREVIEW_MAX_HEIGHT = 700
         
     | 
| 22 | 
         
            +
            PREVIEW_MAX_WIDTH = 1200
         
     | 
| 23 | 
         
            +
             
     | 
| 24 | 
         
            +
            RECENT_DIRECTORY_SOURCE = None
         
     | 
| 25 | 
         
            +
            RECENT_DIRECTORY_TARGET = None
         
     | 
| 26 | 
         
            +
            RECENT_DIRECTORY_OUTPUT = None
         
     | 
| 27 | 
         
            +
             
     | 
| 28 | 
         
            +
            preview_label = None
         
     | 
| 29 | 
         
            +
            preview_slider = None
         
     | 
| 30 | 
         
            +
            source_label = None
         
     | 
| 31 | 
         
            +
            target_label = None
         
     | 
| 32 | 
         
            +
            status_label = None
         
     | 
| 33 | 
         
            +
             
     | 
| 34 | 
         
            +
             
     | 
| 35 | 
         
            +
            def init(start: Callable[[], None], destroy: Callable[[], None]) -> ctk.CTk:
         
     | 
| 36 | 
         
            +
                global ROOT, PREVIEW
         
     | 
| 37 | 
         
            +
             
     | 
| 38 | 
         
            +
                ROOT = create_root(start, destroy)
         
     | 
| 39 | 
         
            +
                PREVIEW = create_preview(ROOT)
         
     | 
| 40 | 
         
            +
             
     | 
| 41 | 
         
            +
                return ROOT
         
     | 
| 42 | 
         
            +
             
     | 
| 43 | 
         
            +
             
     | 
| 44 | 
         
            +
            def create_root(start: Callable[[], None], destroy: Callable[[], None]) -> ctk.CTk:
         
     | 
| 45 | 
         
            +
                global source_label, target_label, status_label
         
     | 
| 46 | 
         
            +
             
     | 
| 47 | 
         
            +
                ctk.deactivate_automatic_dpi_awareness()
         
     | 
| 48 | 
         
            +
                ctk.set_appearance_mode('system')
         
     | 
| 49 | 
         
            +
                ctk.set_default_color_theme(resolve_relative_path('ui.json'))
         
     | 
| 50 | 
         
            +
             
     | 
| 51 | 
         
            +
                root = ctk.CTk()
         
     | 
| 52 | 
         
            +
                root.minsize(ROOT_WIDTH, ROOT_HEIGHT)
         
     | 
| 53 | 
         
            +
                root.title(f'{roop.metadata.name} {roop.metadata.version}')
         
     | 
| 54 | 
         
            +
                root.configure()
         
     | 
| 55 | 
         
            +
                root.protocol('WM_DELETE_WINDOW', lambda: destroy())
         
     | 
| 56 | 
         
            +
             
     | 
| 57 | 
         
            +
                source_label = ctk.CTkLabel(root, text=None)
         
     | 
| 58 | 
         
            +
                source_label.place(relx=0.1, rely=0.1, relwidth=0.3, relheight=0.25)
         
     | 
| 59 | 
         
            +
             
     | 
| 60 | 
         
            +
                target_label = ctk.CTkLabel(root, text=None)
         
     | 
| 61 | 
         
            +
                target_label.place(relx=0.6, rely=0.1, relwidth=0.3, relheight=0.25)
         
     | 
| 62 | 
         
            +
             
     | 
| 63 | 
         
            +
                source_button = ctk.CTkButton(root, text='Select a face', cursor='hand2', command=lambda: select_source_path())
         
     | 
| 64 | 
         
            +
                source_button.place(relx=0.1, rely=0.4, relwidth=0.3, relheight=0.1)
         
     | 
| 65 | 
         
            +
             
     | 
| 66 | 
         
            +
                target_button = ctk.CTkButton(root, text='Select a target', cursor='hand2', command=lambda: select_target_path())
         
     | 
| 67 | 
         
            +
                target_button.place(relx=0.6, rely=0.4, relwidth=0.3, relheight=0.1)
         
     | 
| 68 | 
         
            +
             
     | 
| 69 | 
         
            +
                keep_fps_value = ctk.BooleanVar(value=roop.globals.keep_fps)
         
     | 
| 70 | 
         
            +
                keep_fps_checkbox = ctk.CTkSwitch(root, text='Keep fps', variable=keep_fps_value, cursor='hand2', command=lambda: setattr(roop.globals, 'keep_fps', not roop.globals.keep_fps))
         
     | 
| 71 | 
         
            +
                keep_fps_checkbox.place(relx=0.1, rely=0.6)
         
     | 
| 72 | 
         
            +
             
     | 
| 73 | 
         
            +
                keep_frames_value = ctk.BooleanVar(value=roop.globals.keep_frames)
         
     | 
| 74 | 
         
            +
                keep_frames_switch = ctk.CTkSwitch(root, text='Keep frames', variable=keep_frames_value, cursor='hand2', command=lambda: setattr(roop.globals, 'keep_frames', keep_frames_value.get()))
         
     | 
| 75 | 
         
            +
                keep_frames_switch.place(relx=0.1, rely=0.65)
         
     | 
| 76 | 
         
            +
             
     | 
| 77 | 
         
            +
                keep_audio_value = ctk.BooleanVar(value=roop.globals.keep_audio)
         
     | 
| 78 | 
         
            +
                keep_audio_switch = ctk.CTkSwitch(root, text='Keep audio', variable=keep_audio_value, cursor='hand2', command=lambda: setattr(roop.globals, 'keep_audio', keep_audio_value.get()))
         
     | 
| 79 | 
         
            +
                keep_audio_switch.place(relx=0.6, rely=0.6)
         
     | 
| 80 | 
         
            +
             
     | 
| 81 | 
         
            +
                many_faces_value = ctk.BooleanVar(value=roop.globals.many_faces)
         
     | 
| 82 | 
         
            +
                many_faces_switch = ctk.CTkSwitch(root, text='Many faces', variable=many_faces_value, cursor='hand2', command=lambda: setattr(roop.globals, 'many_faces', many_faces_value.get()))
         
     | 
| 83 | 
         
            +
                many_faces_switch.place(relx=0.6, rely=0.65)
         
     | 
| 84 | 
         
            +
             
     | 
| 85 | 
         
            +
                start_button = ctk.CTkButton(root, text='Start', cursor='hand2', command=lambda: select_output_path(start))
         
     | 
| 86 | 
         
            +
                start_button.place(relx=0.15, rely=0.75, relwidth=0.2, relheight=0.05)
         
     | 
| 87 | 
         
            +
             
     | 
| 88 | 
         
            +
                stop_button = ctk.CTkButton(root, text='Destroy', cursor='hand2', command=lambda: destroy())
         
     | 
| 89 | 
         
            +
                stop_button.place(relx=0.4, rely=0.75, relwidth=0.2, relheight=0.05)
         
     | 
| 90 | 
         
            +
             
     | 
| 91 | 
         
            +
                preview_button = ctk.CTkButton(root, text='Preview', cursor='hand2', command=lambda: toggle_preview())
         
     | 
| 92 | 
         
            +
                preview_button.place(relx=0.65, rely=0.75, relwidth=0.2, relheight=0.05)
         
     | 
| 93 | 
         
            +
             
     | 
| 94 | 
         
            +
                status_label = ctk.CTkLabel(root, text=None, justify='center')
         
     | 
| 95 | 
         
            +
                status_label.place(relx=0.1, rely=0.9, relwidth=0.8)
         
     | 
| 96 | 
         
            +
             
     | 
| 97 | 
         
            +
                donate_label = ctk.CTkLabel(root, text='^_^ Donate to project ^_^', justify='center', cursor='hand2')
         
     | 
| 98 | 
         
            +
                donate_label.place(relx=0.1, rely=0.95, relwidth=0.8)
         
     | 
| 99 | 
         
            +
                donate_label.configure(text_color=ctk.ThemeManager.theme.get('RoopDonate').get('text_color'))
         
     | 
| 100 | 
         
            +
                donate_label.bind('<Button>', lambda event: webbrowser.open('https://github.com/sponsors/s0md3v'))
         
     | 
| 101 | 
         
            +
             
     | 
| 102 | 
         
            +
                return root
         
     | 
| 103 | 
         
            +
             
     | 
| 104 | 
         
            +
             
     | 
| 105 | 
         
            +
            def create_preview(parent: ctk.CTkToplevel) -> ctk.CTkToplevel:
         
     | 
| 106 | 
         
            +
                global preview_label, preview_slider
         
     | 
| 107 | 
         
            +
             
     | 
| 108 | 
         
            +
                preview = ctk.CTkToplevel(parent)
         
     | 
| 109 | 
         
            +
                preview.withdraw()
         
     | 
| 110 | 
         
            +
                preview.title('Preview')
         
     | 
| 111 | 
         
            +
                preview.configure()
         
     | 
| 112 | 
         
            +
                preview.protocol('WM_DELETE_WINDOW', lambda: toggle_preview())
         
     | 
| 113 | 
         
            +
                preview.resizable(width=False, height=False)
         
     | 
| 114 | 
         
            +
             
     | 
| 115 | 
         
            +
                preview_label = ctk.CTkLabel(preview, text=None)
         
     | 
| 116 | 
         
            +
                preview_label.pack(fill='both', expand=True)
         
     | 
| 117 | 
         
            +
             
     | 
| 118 | 
         
            +
                preview_slider = ctk.CTkSlider(preview, from_=0, to=0, command=lambda frame_value: update_preview(frame_value))
         
     | 
| 119 | 
         
            +
             
     | 
| 120 | 
         
            +
                return preview
         
     | 
| 121 | 
         
            +
             
     | 
| 122 | 
         
            +
             
     | 
| 123 | 
         
            +
            def update_status(text: str) -> None:
         
     | 
| 124 | 
         
            +
                status_label.configure(text=text)
         
     | 
| 125 | 
         
            +
                ROOT.update()
         
     | 
| 126 | 
         
            +
             
     | 
| 127 | 
         
            +
             
     | 
| 128 | 
         
            +
            def select_source_path() -> None:
         
     | 
| 129 | 
         
            +
                global RECENT_DIRECTORY_SOURCE
         
     | 
| 130 | 
         
            +
             
     | 
| 131 | 
         
            +
                PREVIEW.withdraw()
         
     | 
| 132 | 
         
            +
                source_path = ctk.filedialog.askopenfilename(title='select an source image', initialdir=RECENT_DIRECTORY_SOURCE)
         
     | 
| 133 | 
         
            +
                if is_image(source_path):
         
     | 
| 134 | 
         
            +
                    roop.globals.source_path = source_path
         
     | 
| 135 | 
         
            +
                    RECENT_DIRECTORY_SOURCE = os.path.dirname(roop.globals.source_path)
         
     | 
| 136 | 
         
            +
                    image = render_image_preview(roop.globals.source_path, (200, 200))
         
     | 
| 137 | 
         
            +
                    source_label.configure(image=image)
         
     | 
| 138 | 
         
            +
                else:
         
     | 
| 139 | 
         
            +
                    roop.globals.source_path = None
         
     | 
| 140 | 
         
            +
                    source_label.configure(image=None)
         
     | 
| 141 | 
         
            +
             
     | 
| 142 | 
         
            +
             
     | 
| 143 | 
         
            +
            def select_target_path() -> None:
         
     | 
| 144 | 
         
            +
                global RECENT_DIRECTORY_TARGET
         
     | 
| 145 | 
         
            +
             
     | 
| 146 | 
         
            +
                PREVIEW.withdraw()
         
     | 
| 147 | 
         
            +
                target_path = ctk.filedialog.askopenfilename(title='select an target image or video', initialdir=RECENT_DIRECTORY_TARGET)
         
     | 
| 148 | 
         
            +
                if is_image(target_path):
         
     | 
| 149 | 
         
            +
                    roop.globals.target_path = target_path
         
     | 
| 150 | 
         
            +
                    RECENT_DIRECTORY_TARGET = os.path.dirname(roop.globals.target_path)
         
     | 
| 151 | 
         
            +
                    image = render_image_preview(roop.globals.target_path, (200, 200))
         
     | 
| 152 | 
         
            +
                    target_label.configure(image=image)
         
     | 
| 153 | 
         
            +
                elif is_video(target_path):
         
     | 
| 154 | 
         
            +
                    roop.globals.target_path = target_path
         
     | 
| 155 | 
         
            +
                    RECENT_DIRECTORY_TARGET = os.path.dirname(roop.globals.target_path)
         
     | 
| 156 | 
         
            +
                    video_frame = render_video_preview(target_path, (200, 200))
         
     | 
| 157 | 
         
            +
                    target_label.configure(image=video_frame)
         
     | 
| 158 | 
         
            +
                else:
         
     | 
| 159 | 
         
            +
                    roop.globals.target_path = None
         
     | 
| 160 | 
         
            +
                    target_label.configure(image=None)
         
     | 
| 161 | 
         
            +
             
     | 
| 162 | 
         
            +
             
     | 
| 163 | 
         
            +
            def select_output_path(start: Callable[[], None]) -> None:
         
     | 
| 164 | 
         
            +
                global RECENT_DIRECTORY_OUTPUT
         
     | 
| 165 | 
         
            +
             
     | 
| 166 | 
         
            +
                if is_image(roop.globals.target_path):
         
     | 
| 167 | 
         
            +
                    output_path = ctk.filedialog.asksaveasfilename(title='save image output file', defaultextension='.png', initialfile='output.png', initialdir=RECENT_DIRECTORY_OUTPUT)
         
     | 
| 168 | 
         
            +
                elif is_video(roop.globals.target_path):
         
     | 
| 169 | 
         
            +
                    output_path = ctk.filedialog.asksaveasfilename(title='save video output file', defaultextension='.mp4', initialfile='output.mp4', initialdir=RECENT_DIRECTORY_OUTPUT)
         
     | 
| 170 | 
         
            +
                else:
         
     | 
| 171 | 
         
            +
                    output_path = None
         
     | 
| 172 | 
         
            +
                if output_path:
         
     | 
| 173 | 
         
            +
                    roop.globals.output_path = output_path
         
     | 
| 174 | 
         
            +
                    RECENT_DIRECTORY_OUTPUT = os.path.dirname(roop.globals.output_path)
         
     | 
| 175 | 
         
            +
                    start()
         
     | 
| 176 | 
         
            +
             
     | 
| 177 | 
         
            +
             
     | 
| 178 | 
         
            +
            def render_image_preview(image_path: str, size: Tuple[int, int]) -> ctk.CTkImage:
         
     | 
| 179 | 
         
            +
                image = Image.open(image_path)
         
     | 
| 180 | 
         
            +
                if size:
         
     | 
| 181 | 
         
            +
                    image = ImageOps.fit(image, size, Image.LANCZOS)
         
     | 
| 182 | 
         
            +
                return ctk.CTkImage(image, size=image.size)
         
     | 
| 183 | 
         
            +
             
     | 
| 184 | 
         
            +
             
     | 
| 185 | 
         
            +
            def render_video_preview(video_path: str, size: Tuple[int, int], frame_number: int = 0) -> ctk.CTkImage:
         
     | 
| 186 | 
         
            +
                capture = cv2.VideoCapture(video_path)
         
     | 
| 187 | 
         
            +
                if frame_number:
         
     | 
| 188 | 
         
            +
                    capture.set(cv2.CAP_PROP_POS_FRAMES, frame_number)
         
     | 
| 189 | 
         
            +
                has_frame, frame = capture.read()
         
     | 
| 190 | 
         
            +
                if has_frame:
         
     | 
| 191 | 
         
            +
                    image = Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
         
     | 
| 192 | 
         
            +
                    if size:
         
     | 
| 193 | 
         
            +
                        image = ImageOps.fit(image, size, Image.LANCZOS)
         
     | 
| 194 | 
         
            +
                    return ctk.CTkImage(image, size=image.size)
         
     | 
| 195 | 
         
            +
                capture.release()
         
     | 
| 196 | 
         
            +
                cv2.destroyAllWindows()
         
     | 
| 197 | 
         
            +
             
     | 
| 198 | 
         
            +
             
     | 
| 199 | 
         
            +
            def toggle_preview() -> None:
         
     | 
| 200 | 
         
            +
                if PREVIEW.state() == 'normal':
         
     | 
| 201 | 
         
            +
                    PREVIEW.withdraw()
         
     | 
| 202 | 
         
            +
                elif roop.globals.source_path and roop.globals.target_path:
         
     | 
| 203 | 
         
            +
                    init_preview()
         
     | 
| 204 | 
         
            +
                    update_preview()
         
     | 
| 205 | 
         
            +
                    PREVIEW.deiconify()
         
     | 
| 206 | 
         
            +
             
     | 
| 207 | 
         
            +
             
     | 
| 208 | 
         
            +
            def init_preview() -> None:
         
     | 
| 209 | 
         
            +
                if is_image(roop.globals.target_path):
         
     | 
| 210 | 
         
            +
                    preview_slider.pack_forget()
         
     | 
| 211 | 
         
            +
                if is_video(roop.globals.target_path):
         
     | 
| 212 | 
         
            +
                    video_frame_total = get_video_frame_total(roop.globals.target_path)
         
     | 
| 213 | 
         
            +
                    preview_slider.configure(to=video_frame_total)
         
     | 
| 214 | 
         
            +
                    preview_slider.pack(fill='x')
         
     | 
| 215 | 
         
            +
                    preview_slider.set(0)
         
     | 
| 216 | 
         
            +
             
     | 
| 217 | 
         
            +
             
     | 
| 218 | 
         
            +
            def update_preview(frame_number: int = 0) -> None:
         
     | 
| 219 | 
         
            +
                if roop.globals.source_path and roop.globals.target_path:
         
     | 
| 220 | 
         
            +
                    temp_frame = get_video_frame(roop.globals.target_path, frame_number)
         
     | 
| 221 | 
         
            +
                    if predict_frame(temp_frame):
         
     | 
| 222 | 
         
            +
                        quit()
         
     | 
| 223 | 
         
            +
                    for frame_processor in get_frame_processors_modules(roop.globals.frame_processors):
         
     | 
| 224 | 
         
            +
                        temp_frame = frame_processor.process_frame(
         
     | 
| 225 | 
         
            +
                            get_one_face(cv2.imread(roop.globals.source_path)),
         
     | 
| 226 | 
         
            +
                            temp_frame
         
     | 
| 227 | 
         
            +
                        )
         
     | 
| 228 | 
         
            +
                    image = Image.fromarray(cv2.cvtColor(temp_frame, cv2.COLOR_BGR2RGB))
         
     | 
| 229 | 
         
            +
                    image = ImageOps.contain(image, (PREVIEW_MAX_WIDTH, PREVIEW_MAX_HEIGHT), Image.LANCZOS)
         
     | 
| 230 | 
         
            +
                    image = ctk.CTkImage(image, size=image.size)
         
     | 
| 231 | 
         
            +
                    preview_label.configure(image=image)
         
     | 
    	
        roop/utilities.py
    ADDED
    
    | 
         @@ -0,0 +1,141 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            import glob
         
     | 
| 2 | 
         
            +
            import mimetypes
         
     | 
| 3 | 
         
            +
            import os
         
     | 
| 4 | 
         
            +
            import platform
         
     | 
| 5 | 
         
            +
            import shutil
         
     | 
| 6 | 
         
            +
            import ssl
         
     | 
| 7 | 
         
            +
            import subprocess
         
     | 
| 8 | 
         
            +
            import urllib
         
     | 
| 9 | 
         
            +
            from pathlib import Path
         
     | 
| 10 | 
         
            +
            from typing import List, Any
         
     | 
| 11 | 
         
            +
            from tqdm import tqdm
         
     | 
| 12 | 
         
            +
             
     | 
| 13 | 
         
            +
            import roop.globals
         
     | 
| 14 | 
         
            +
             
     | 
| 15 | 
         
            +
            TEMP_FILE = 'temp.mp4'
         
     | 
| 16 | 
         
            +
            TEMP_DIRECTORY = 'temp'
         
     | 
| 17 | 
         
            +
             
     | 
| 18 | 
         
            +
            # monkey patch ssl for mac
         
     | 
| 19 | 
         
            +
            if platform.system().lower() == 'darwin':
         
     | 
| 20 | 
         
            +
                ssl._create_default_https_context = ssl._create_unverified_context
         
     | 
| 21 | 
         
            +
             
     | 
| 22 | 
         
            +
             
     | 
| 23 | 
         
            +
            def run_ffmpeg(args: List[str]) -> bool:
         
     | 
| 24 | 
         
            +
                commands = ['ffmpeg', '-hide_banner', '-hwaccel', 'auto', '-loglevel', roop.globals.log_level]
         
     | 
| 25 | 
         
            +
                commands.extend(args)
         
     | 
| 26 | 
         
            +
                try:
         
     | 
| 27 | 
         
            +
                    subprocess.check_output(commands, stderr=subprocess.STDOUT)
         
     | 
| 28 | 
         
            +
                    return True
         
     | 
| 29 | 
         
            +
                except Exception:
         
     | 
| 30 | 
         
            +
                    pass
         
     | 
| 31 | 
         
            +
                return False
         
     | 
| 32 | 
         
            +
             
     | 
| 33 | 
         
            +
             
     | 
| 34 | 
         
            +
            def detect_fps(target_path: str) -> float:
         
     | 
| 35 | 
         
            +
                command = ['ffprobe', '-v', 'error', '-select_streams', 'v:0', '-show_entries', 'stream=r_frame_rate', '-of', 'default=noprint_wrappers=1:nokey=1', target_path]
         
     | 
| 36 | 
         
            +
                output = subprocess.check_output(command).decode().strip().split('/')
         
     | 
| 37 | 
         
            +
                try:
         
     | 
| 38 | 
         
            +
                    numerator, denominator = map(int, output)
         
     | 
| 39 | 
         
            +
                    return numerator / denominator
         
     | 
| 40 | 
         
            +
                except Exception:
         
     | 
| 41 | 
         
            +
                    pass
         
     | 
| 42 | 
         
            +
                return 30.0
         
     | 
| 43 | 
         
            +
             
     | 
| 44 | 
         
            +
             
     | 
| 45 | 
         
            +
            def extract_frames(target_path: str) -> None:
         
     | 
| 46 | 
         
            +
                temp_directory_path = get_temp_directory_path(target_path)
         
     | 
| 47 | 
         
            +
                run_ffmpeg(['-i', target_path, '-pix_fmt', 'rgb24', os.path.join(temp_directory_path, '%04d.png')])
         
     | 
| 48 | 
         
            +
             
     | 
| 49 | 
         
            +
             
     | 
| 50 | 
         
            +
            def create_video(target_path: str, fps: float = 30.0) -> None:
         
     | 
| 51 | 
         
            +
                temp_output_path = get_temp_output_path(target_path)
         
     | 
| 52 | 
         
            +
                temp_directory_path = get_temp_directory_path(target_path)
         
     | 
| 53 | 
         
            +
                run_ffmpeg(['-r', str(fps), '-i', os.path.join(temp_directory_path, '%04d.png'), '-c:v', roop.globals.video_encoder, '-crf', str(roop.globals.video_quality), '-pix_fmt', 'yuv420p', '-vf', 'colorspace=bt709:iall=bt601-6-625:fast=1', '-y', temp_output_path])
         
     | 
| 54 | 
         
            +
             
     | 
| 55 | 
         
            +
             
     | 
| 56 | 
         
            +
            def restore_audio(target_path: str, output_path: str) -> None:
         
     | 
| 57 | 
         
            +
                temp_output_path = get_temp_output_path(target_path)
         
     | 
| 58 | 
         
            +
                done = run_ffmpeg(['-i', temp_output_path, '-i', target_path, '-c:v', 'copy', '-map', '0:v:0', '-map', '1:a:0', '-y', output_path])
         
     | 
| 59 | 
         
            +
                if not done:
         
     | 
| 60 | 
         
            +
                    move_temp(target_path, output_path)
         
     | 
| 61 | 
         
            +
             
     | 
| 62 | 
         
            +
             
     | 
| 63 | 
         
            +
            def get_temp_frame_paths(target_path: str) -> List[str]:
         
     | 
| 64 | 
         
            +
                temp_directory_path = get_temp_directory_path(target_path)
         
     | 
| 65 | 
         
            +
                return glob.glob((os.path.join(glob.escape(temp_directory_path), '*.png')))
         
     | 
| 66 | 
         
            +
             
     | 
| 67 | 
         
            +
             
     | 
| 68 | 
         
            +
            def get_temp_directory_path(target_path: str) -> str:
         
     | 
| 69 | 
         
            +
                target_name, _ = os.path.splitext(os.path.basename(target_path))
         
     | 
| 70 | 
         
            +
                target_directory_path = os.path.dirname(target_path)
         
     | 
| 71 | 
         
            +
                return os.path.join(target_directory_path, TEMP_DIRECTORY, target_name)
         
     | 
| 72 | 
         
            +
             
     | 
| 73 | 
         
            +
             
     | 
| 74 | 
         
            +
            def get_temp_output_path(target_path: str) -> str:
         
     | 
| 75 | 
         
            +
                temp_directory_path = get_temp_directory_path(target_path)
         
     | 
| 76 | 
         
            +
                return os.path.join(temp_directory_path, TEMP_FILE)
         
     | 
| 77 | 
         
            +
             
     | 
| 78 | 
         
            +
             
     | 
| 79 | 
         
            +
            def normalize_output_path(source_path: str, target_path: str, output_path: str) -> Any:
         
     | 
| 80 | 
         
            +
                if source_path and target_path:
         
     | 
| 81 | 
         
            +
                    source_name, _ = os.path.splitext(os.path.basename(source_path))
         
     | 
| 82 | 
         
            +
                    target_name, target_extension = os.path.splitext(os.path.basename(target_path))
         
     | 
| 83 | 
         
            +
                    if os.path.isdir(output_path):
         
     | 
| 84 | 
         
            +
                        return os.path.join(output_path, source_name + '-' + target_name + target_extension)
         
     | 
| 85 | 
         
            +
                return output_path
         
     | 
| 86 | 
         
            +
             
     | 
| 87 | 
         
            +
             
     | 
| 88 | 
         
            +
            def create_temp(target_path: str) -> None:
         
     | 
| 89 | 
         
            +
                temp_directory_path = get_temp_directory_path(target_path)
         
     | 
| 90 | 
         
            +
                Path(temp_directory_path).mkdir(parents=True, exist_ok=True)
         
     | 
| 91 | 
         
            +
             
     | 
| 92 | 
         
            +
             
     | 
| 93 | 
         
            +
            def move_temp(target_path: str, output_path: str) -> None:
         
     | 
| 94 | 
         
            +
                temp_output_path = get_temp_output_path(target_path)
         
     | 
| 95 | 
         
            +
                if os.path.isfile(temp_output_path):
         
     | 
| 96 | 
         
            +
                    if os.path.isfile(output_path):
         
     | 
| 97 | 
         
            +
                        os.remove(output_path)
         
     | 
| 98 | 
         
            +
                    shutil.move(temp_output_path, output_path)
         
     | 
| 99 | 
         
            +
             
     | 
| 100 | 
         
            +
             
     | 
| 101 | 
         
            +
            def clean_temp(target_path: str) -> None:
         
     | 
| 102 | 
         
            +
                temp_directory_path = get_temp_directory_path(target_path)
         
     | 
| 103 | 
         
            +
                parent_directory_path = os.path.dirname(temp_directory_path)
         
     | 
| 104 | 
         
            +
                if not roop.globals.keep_frames and os.path.isdir(temp_directory_path):
         
     | 
| 105 | 
         
            +
                    shutil.rmtree(temp_directory_path)
         
     | 
| 106 | 
         
            +
                if os.path.exists(parent_directory_path) and not os.listdir(parent_directory_path):
         
     | 
| 107 | 
         
            +
                    os.rmdir(parent_directory_path)
         
     | 
| 108 | 
         
            +
             
     | 
| 109 | 
         
            +
             
     | 
| 110 | 
         
            +
            def has_image_extension(image_path: str) -> bool:
         
     | 
| 111 | 
         
            +
                return image_path.lower().endswith(('png', 'jpg', 'jpeg', 'webp'))
         
     | 
| 112 | 
         
            +
             
     | 
| 113 | 
         
            +
             
     | 
| 114 | 
         
            +
            def is_image(image_path: str) -> bool:
         
     | 
| 115 | 
         
            +
                if image_path and os.path.isfile(image_path):
         
     | 
| 116 | 
         
            +
                    mimetype, _ = mimetypes.guess_type(image_path)
         
     | 
| 117 | 
         
            +
                    return bool(mimetype and mimetype.startswith('image/'))
         
     | 
| 118 | 
         
            +
                return False
         
     | 
| 119 | 
         
            +
             
     | 
| 120 | 
         
            +
             
     | 
| 121 | 
         
            +
            def is_video(video_path: str) -> bool:
         
     | 
| 122 | 
         
            +
                if video_path and os.path.isfile(video_path):
         
     | 
| 123 | 
         
            +
                    mimetype, _ = mimetypes.guess_type(video_path)
         
     | 
| 124 | 
         
            +
                    return bool(mimetype and mimetype.startswith('video/'))
         
     | 
| 125 | 
         
            +
                return False
         
     | 
| 126 | 
         
            +
             
     | 
| 127 | 
         
            +
             
     | 
| 128 | 
         
            +
            def conditional_download(download_directory_path: str, urls: List[str]) -> None:
         
     | 
| 129 | 
         
            +
                if not os.path.exists(download_directory_path):
         
     | 
| 130 | 
         
            +
                    os.makedirs(download_directory_path)
         
     | 
| 131 | 
         
            +
                for url in urls:
         
     | 
| 132 | 
         
            +
                    download_file_path = os.path.join(download_directory_path, os.path.basename(url))
         
     | 
| 133 | 
         
            +
                    if not os.path.exists(download_file_path):
         
     | 
| 134 | 
         
            +
                        request = urllib.request.urlopen(url) # type: ignore[attr-defined]
         
     | 
| 135 | 
         
            +
                        total = int(request.headers.get('Content-Length', 0))
         
     | 
| 136 | 
         
            +
                        with tqdm(total=total, desc='Downloading', unit='B', unit_scale=True, unit_divisor=1024) as progress:
         
     | 
| 137 | 
         
            +
                            urllib.request.urlretrieve(url, download_file_path, reporthook=lambda count, block_size, total_size: progress.update(block_size)) # type: ignore[attr-defined]
         
     | 
| 138 | 
         
            +
             
     | 
| 139 | 
         
            +
             
     | 
| 140 | 
         
            +
            def resolve_relative_path(path: str) -> str:
         
     | 
| 141 | 
         
            +
                return os.path.abspath(os.path.join(os.path.dirname(__file__), path))
         
     |