File size: 1,985 Bytes
80ebd45
 
 
4e7548f
3428530
 
80ebd45
37d0f49
 
80ebd45
37d0f49
 
 
 
80ebd45
3428530
 
 
 
 
 
 
 
 
 
4e7548f
 
 
3428530
4e7548f
 
 
 
 
 
 
3428530
f700d4a
1b4bfa7
 
 
 
 
 
 
4e7548f
 
 
1b4bfa7
4e7548f
1b4bfa7
3428530
 
1b4bfa7
37d0f49
1b4bfa7
4e7548f
1b4bfa7
4e7548f
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
import gradio as gr
import spaces
import torch
import subprocess
import os
import ffmpeg

zero = torch.Tensor([0]).cuda()
print(zero.device) # <-- 'cpu' πŸ€”

@spaces.GPU
def greet(n):
    print(zero.device) # <-- 'cuda:0' πŸ€—
    return f"Hello {zero + n} Tensor"

def audio_video():
    print("started =========================")
    input_video = ffmpeg.input('results/result_voice.mp4')

    input_audio = ffmpeg.input('sample_data/sir.mp3')
    os.system(f"rm -rf results/final_output.mp4")
    ffmpeg.concat(input_video, input_audio, v=1, a=1).output('results/final_output.mp4').run()
    
    return "results/final_output.mp4"

def run_infrence(input_video,input_audio):
    audio = "sample_data/sir.mp3"
    video = "sample_data/spark_input.mp4"
    command = f'python3 inference.py --checkpoint_path checkpoints/wav2lip_gan.pth --face sample_data/spark.png --audio sample_data/sir.mp3'
    print("running ")
    # Execute the command
    process = subprocess.Popen(command, stdout=subprocess.PIPE, shell=True)

    # Get the output
    output, error = process.communicate()

    return audio_video()

def run():
  with gr.Blocks(css=".gradio-container {background-color: lightgray} #radio_div {background-color: #FFD8B4; font-size: 40px;}") as demo:
    gr.Markdown("<h1 style='text-align: center;'>"+ "One Shot Talking Face from Text" + "</h1><br/><br/>")
    with gr.Group():
      # with gr.Box():
        with gr.Row():
        # with gr.Row().style(equal_height=True):
            input_video = gr.Video(label="Input Video")
            input_audio = gr.Audio(label="Input Audio")
            video_out = gr.Video(show_label=True,label="Output")
        with gr.Row():
            btn = gr.Button("Generate")   

    btn.click(run_infrence,inputs=[input_video,input_audio], outputs=[video_out])
    # btn.click(run_infrence,inputs=[input_video,input_audio])
    demo.queue()
    demo.launch(server_name="0.0.0.0", server_port=7860)


if __name__ == "__main__":
    run()