pragnakalp commited on
Commit
81cf309
·
1 Parent(s): 2ea8134

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +16 -24
app.py CHANGED
@@ -34,33 +34,25 @@ def one_shot(image,input_text,gender):
34
  sound = AudioSegment.from_file(f.name, format="mp3")
35
  sound.export("/content/audio.wav", format="wav")
36
  waveform, sample_rate = torchaudio.load("/content/audio.wav")
37
-
38
- torchaudio.save("/content/audio.wav", waveform, sample_rate, encoding="PCM_S", bits_per_sample=16)
39
- image = Image.open(image_in)
40
- image = pad_image(image)
41
- image.save("/content/image_pre.png")
42
- pocketsphinx_run = subprocess.run(['pocketsphinx', '-phone_align', 'yes', 'single', '/content/audio.wav'], check=True, capture_output=True)
43
- jq_run = subprocess.run(['jq', '[.w[]|{word: (.t | ascii_upcase | sub("<S>"; "sil") | sub("<SIL>"; "sil") | sub("\\\(2\\\)"; "") | sub("\\\(3\\\)"; "") | sub("\\\(4\\\)"; "") | sub("\\\[SPEECH\\\]"; "SIL") | sub("\\\[NOISE\\\]"; "SIL")), phones: [.w[]|{ph: .t | sub("\\\+SPN\\\+"; "SIL") | sub("\\\+NSN\\\+"; "SIL"), bg: (.b*100)|floor, ed: (.b*100+.d*100)|floor}]}]'], input=pocketsphinx_run.stdout, capture_output=True)
44
- with open("test.json", "w") as f:
45
- f.write(jq_run.stdout.decode('utf-8').strip())
46
-
47
- os.system(f"cd /content/one-shot-talking-face && python3 -B test_script.py --img_path /content/image_pre.png --audio_path /content/audio.wav --phoneme_path /content/test.json --save_dir /content/train")
 
 
 
 
48
  return "/content/train/image_pre_audio.mp4"
49
  exit()
50
 
51
- #1. Extract the frames from the video file using PyVideoFramesExtractor
52
- os.system(f"python /content/PyVideoFramesExtractor/extract.py --video=/content/train/image_pre_audio.mp4")
53
-
54
- #2. Improve image quality using GFPGAN on each frames
55
- os.system(f"python /content/GFPGAN/inference_gfpgan.py --upscale 2 -i /content/extracted_frames/ -o /content/video_results --bg_upsampler realesrgan")
56
-
57
- #3. Merge all the frames to a one video using imageio
58
- path = '/content/video_results/restored_imgs'
59
- image_folder = os.fsencode(path)
60
- print(image_folder)
61
- filenames = []
62
-
63
- return "/content/audio.wav"
64
 
65
  elif gender == 'Male' or gender == 'male':
66
  print(gender)
 
34
  sound = AudioSegment.from_file(f.name, format="mp3")
35
  sound.export("/content/audio.wav", format="wav")
36
  waveform, sample_rate = torchaudio.load("/content/audio.wav")
37
+
38
+ try:
39
+ torchaudio.save("/content/audio.wav", waveform, sample_rate, encoding="PCM_S", bits_per_sample=16)
40
+ image = Image.open(image_in)
41
+ image = pad_image(image)
42
+ image.save("/content/image_pre.png")
43
+ pocketsphinx_run = subprocess.run(['pocketsphinx', '-phone_align', 'yes', 'single', '/content/audio.wav'], check=True, capture_output=True)
44
+ jq_run = subprocess.run(['jq', '[.w[]|{word: (.t | ascii_upcase | sub("<S>"; "sil") | sub("<SIL>"; "sil") | sub("\\\(2\\\)"; "") | sub("\\\(3\\\)"; "") | sub("\\\(4\\\)"; "") | sub("\\\[SPEECH\\\]"; "SIL") | sub("\\\[NOISE\\\]"; "SIL")), phones: [.w[]|{ph: .t | sub("\\\+SPN\\\+"; "SIL") | sub("\\\+NSN\\\+"; "SIL"), bg: (.b*100)|floor, ed: (.b*100+.d*100)|floor}]}]'], input=pocketsphinx_run.stdout, capture_output=True)
45
+ with open("test.json", "w") as f:
46
+ f.write(jq_run.stdout.decode('utf-8').strip())
47
+
48
+ os.system(f"cd /content/one-shot-talking-face && python3 -B test_script.py --img_path /content/image_pre.png --audio_path /content/audio.wav --phoneme_path /content/test.json --save_dir /content/train")
49
+
50
+ except Exception as e:
51
+ print(e)
52
  return "/content/train/image_pre_audio.mp4"
53
  exit()
54
 
55
+
 
 
 
 
 
 
 
 
 
 
 
 
56
 
57
  elif gender == 'Male' or gender == 'male':
58
  print(gender)