pragnakalp commited on
Commit
997e07d
·
1 Parent(s): 8465d71

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +29 -13
app.py CHANGED
@@ -23,7 +23,34 @@ import ffmpeg
23
 
24
  block = gr.Blocks()
25
 
26
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
27
 
28
  def one_shot(image,input_text,gender):
29
  if gender == 'Female' or gender == 'female':
@@ -33,19 +60,8 @@ def one_shot(image,input_text,gender):
33
  f.seek(0)
34
  sound = AudioSegment.from_file(f.name, format="mp3")
35
  sound.export("/content/audio.wav", format="wav")
36
- waveform, sample_rate = torchaudio.load("/content/audio.wav")
37
- torchaudio.save("/content/audio.wav", waveform, sample_rate, encoding="PCM_S", bits_per_sample=16)
38
- image = Image.open(image_in)
39
- image = pad_image(image)
40
- image.save("/content/image_pre.png")
41
- pocketsphinx_run = subprocess.run(['pocketsphinx', '-phone_align', 'yes', 'single', '/content/audio.wav'], check=True, capture_output=True)
42
- jq_run = subprocess.run(['jq', '[.w[]|{word: (.t | ascii_upcase | sub("<S>"; "sil") | sub("<SIL>"; "sil") | sub("\\\(2\\\)"; "") | sub("\\\(3\\\)"; "") | sub("\\\(4\\\)"; "") | sub("\\\[SPEECH\\\]"; "SIL") | sub("\\\[NOISE\\\]"; "SIL")), phones: [.w[]|{ph: .t | sub("\\\+SPN\\\+"; "SIL") | sub("\\\+NSN\\\+"; "SIL"), bg: (.b*100)|floor, ed: (.b*100+.d*100)|floor}]}]'], input=pocketsphinx_run.stdout, capture_output=True)
43
- with open("test.json", "w") as f:
44
- f.write(jq_run.stdout.decode('utf-8').strip())
45
-
46
- os.system(f"cd /content/one-shot-talking-face && python3 -B test_script.py --img_path /content/image_pre.png --audio_path /content/audio.wav --phoneme_path /content/test.json --save_dir /content/train")
47
 
48
- return "/content/image_pre.png"
49
 
50
 
51
  elif gender == 'Male' or gender == 'male':
 
23
 
24
  block = gr.Blocks()
25
 
26
+ def pad_image(image):
27
+ w, h = image.size
28
+ if w == h:
29
+ return image
30
+ elif w > h:
31
+ new_image = Image.new(image.mode, (w, w), (0, 0, 0))
32
+ new_image.paste(image, (0, (w - h) // 2))
33
+ return new_image
34
+ else:
35
+ new_image = Image.new(image.mode, (h, h), (0, 0, 0))
36
+ new_image.paste(image, ((h - w) // 2, 0))
37
+ return new_image
38
+
39
+ def calculate(image_in, audio_in):
40
+ waveform, sample_rate = torchaudio.load(audio_in)
41
+ waveform = torch.mean(waveform, dim=0, keepdim=True)
42
+ torchaudio.save("/content/audio.wav", waveform, sample_rate, encoding="PCM_S", bits_per_sample=16)
43
+ image = Image.open(image_in)
44
+ image = pad_image(image)
45
+ image.save("image.png")
46
+
47
+ pocketsphinx_run = subprocess.run(['pocketsphinx', '-phone_align', 'yes', 'single', '/content/audio.wav'], check=True, capture_output=True)
48
+ jq_run = subprocess.run(['jq', '[.w[]|{word: (.t | ascii_upcase | sub("<S>"; "sil") | sub("<SIL>"; "sil") | sub("\\\(2\\\)"; "") | sub("\\\(3\\\)"; "") | sub("\\\(4\\\)"; "") | sub("\\\[SPEECH\\\]"; "SIL") | sub("\\\[NOISE\\\]"; "SIL")), phones: [.w[]|{ph: .t | sub("\\\+SPN\\\+"; "SIL") | sub("\\\+NSN\\\+"; "SIL"), bg: (.b*100)|floor, ed: (.b*100+.d*100)|floor}]}]'], input=pocketsphinx_run.stdout, capture_output=True)
49
+ with open("test.json", "w") as f:
50
+ f.write(jq_run.stdout.decode('utf-8').strip())
51
+ # device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
52
+ os.system(f"cd /content/one-shot-talking-face && python3 -B test_script.py --img_path /content/image.png --audio_path /content/audio.wav --phoneme_path /content/test.json --save_dir /content/train")
53
+ return "/content/train/image_audio.mp4"
54
 
55
  def one_shot(image,input_text,gender):
56
  if gender == 'Female' or gender == 'female':
 
60
  f.seek(0)
61
  sound = AudioSegment.from_file(f.name, format="mp3")
62
  sound.export("/content/audio.wav", format="wav")
63
+ calculate(image,"/content/audio.wav")
 
 
 
 
 
 
 
 
 
 
64
 
 
65
 
66
 
67
  elif gender == 'Male' or gender == 'male':