Commit
·
f9d7c5e
1
Parent(s):
293dca7
Update app.py
Browse files
app.py
CHANGED
@@ -44,19 +44,10 @@ def one_shot(image,input_text,gender):
|
|
44 |
jq_run = subprocess.run(['jq', '[.w[]|{word: (.t | ascii_upcase | sub("<S>"; "sil") | sub("<SIL>"; "sil") | sub("\\\(2\\\)"; "") | sub("\\\(3\\\)"; "") | sub("\\\(4\\\)"; "") | sub("\\\[SPEECH\\\]"; "SIL") | sub("\\\[NOISE\\\]"; "SIL")), phones: [.w[]|{ph: .t | sub("\\\+SPN\\\+"; "SIL") | sub("\\\+NSN\\\+"; "SIL"), bg: (.b*100)|floor, ed: (.b*100+.d*100)|floor}]}]'], input=pocketsphinx_run.stdout, capture_output=True)
|
45 |
with open("test.json", "w") as f:
|
46 |
f.write(jq_run.stdout.decode('utf-8').strip())
|
47 |
-
import json
|
48 |
-
|
49 |
-
with open('test.json') as user_file:
|
50 |
-
file_contents = user_file.read()
|
51 |
-
|
52 |
-
|
53 |
|
54 |
-
parsed_json = json.loads(file_contents)
|
55 |
-
return parsed_json
|
56 |
-
exit()
|
57 |
os.system(f"cd /content/one-shot-talking-face && python3 -B test_script.py --img_path /content/image_pre.png --audio_path /content/audio.wav --phoneme_path /content/test.json --save_dir /content/train")
|
58 |
|
59 |
-
|
60 |
|
61 |
|
62 |
elif gender == 'Male' or gender == 'male':
|
@@ -99,7 +90,7 @@ def run():
|
|
99 |
# audio_in = gr.Audio(show_label=False, type='filepath')
|
100 |
input_text=gr.Textbox(lines=3, value="Hello How are you?", label="Input Text")
|
101 |
gender = gr.Radio(["Female","Male"],value="Female",label="Gender")
|
102 |
-
video_out = gr.
|
103 |
# video_out = gr.Video(show_label=False)
|
104 |
with gr.Row().style(equal_height=True):
|
105 |
btn = gr.Button("Generate")
|
|
|
44 |
jq_run = subprocess.run(['jq', '[.w[]|{word: (.t | ascii_upcase | sub("<S>"; "sil") | sub("<SIL>"; "sil") | sub("\\\(2\\\)"; "") | sub("\\\(3\\\)"; "") | sub("\\\(4\\\)"; "") | sub("\\\[SPEECH\\\]"; "SIL") | sub("\\\[NOISE\\\]"; "SIL")), phones: [.w[]|{ph: .t | sub("\\\+SPN\\\+"; "SIL") | sub("\\\+NSN\\\+"; "SIL"), bg: (.b*100)|floor, ed: (.b*100+.d*100)|floor}]}]'], input=pocketsphinx_run.stdout, capture_output=True)
|
45 |
with open("test.json", "w") as f:
|
46 |
f.write(jq_run.stdout.decode('utf-8').strip())
|
|
|
|
|
|
|
|
|
|
|
|
|
47 |
|
|
|
|
|
|
|
48 |
os.system(f"cd /content/one-shot-talking-face && python3 -B test_script.py --img_path /content/image_pre.png --audio_path /content/audio.wav --phoneme_path /content/test.json --save_dir /content/train")
|
49 |
|
50 |
+
return "/content/train/image_audio.mp4"
|
51 |
|
52 |
|
53 |
elif gender == 'Male' or gender == 'male':
|
|
|
90 |
# audio_in = gr.Audio(show_label=False, type='filepath')
|
91 |
input_text=gr.Textbox(lines=3, value="Hello How are you?", label="Input Text")
|
92 |
gender = gr.Radio(["Female","Male"],value="Female",label="Gender")
|
93 |
+
video_out = gr.Video(label="output")
|
94 |
# video_out = gr.Video(show_label=False)
|
95 |
with gr.Row().style(equal_height=True):
|
96 |
btn = gr.Button("Generate")
|