pragnakalp commited on
Commit
2034a10
·
1 Parent(s): 997e07d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -48,7 +48,7 @@ def calculate(image_in, audio_in):
48
  jq_run = subprocess.run(['jq', '[.w[]|{word: (.t | ascii_upcase | sub("<S>"; "sil") | sub("<SIL>"; "sil") | sub("\\\(2\\\)"; "") | sub("\\\(3\\\)"; "") | sub("\\\(4\\\)"; "") | sub("\\\[SPEECH\\\]"; "SIL") | sub("\\\[NOISE\\\]"; "SIL")), phones: [.w[]|{ph: .t | sub("\\\+SPN\\\+"; "SIL") | sub("\\\+NSN\\\+"; "SIL"), bg: (.b*100)|floor, ed: (.b*100+.d*100)|floor}]}]'], input=pocketsphinx_run.stdout, capture_output=True)
49
  with open("test.json", "w") as f:
50
  f.write(jq_run.stdout.decode('utf-8').strip())
51
- # device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
52
  os.system(f"cd /content/one-shot-talking-face && python3 -B test_script.py --img_path /content/image.png --audio_path /content/audio.wav --phoneme_path /content/test.json --save_dir /content/train")
53
  return "/content/train/image_audio.mp4"
54
 
@@ -60,7 +60,8 @@ def one_shot(image,input_text,gender):
60
  f.seek(0)
61
  sound = AudioSegment.from_file(f.name, format="mp3")
62
  sound.export("/content/audio.wav", format="wav")
63
- calculate(image,"/content/audio.wav")
 
64
 
65
 
66
 
@@ -74,7 +75,6 @@ def one_shot(image,input_text,gender):
74
  model = models[0].cuda()
75
  TTSHubInterface.update_cfg_with_data_cfg(cfg, task.data_cfg)
76
  generator = task.build_generator([model], cfg)
77
- # next(model.parameters()).device
78
 
79
  sample = TTSHubInterface.get_model_input(task, input_text)
80
  sample["net_input"]["src_tokens"] = sample["net_input"]["src_tokens"].cuda()
 
48
  jq_run = subprocess.run(['jq', '[.w[]|{word: (.t | ascii_upcase | sub("<S>"; "sil") | sub("<SIL>"; "sil") | sub("\\\(2\\\)"; "") | sub("\\\(3\\\)"; "") | sub("\\\(4\\\)"; "") | sub("\\\[SPEECH\\\]"; "SIL") | sub("\\\[NOISE\\\]"; "SIL")), phones: [.w[]|{ph: .t | sub("\\\+SPN\\\+"; "SIL") | sub("\\\+NSN\\\+"; "SIL"), bg: (.b*100)|floor, ed: (.b*100+.d*100)|floor}]}]'], input=pocketsphinx_run.stdout, capture_output=True)
49
  with open("test.json", "w") as f:
50
  f.write(jq_run.stdout.decode('utf-8').strip())
51
+
52
  os.system(f"cd /content/one-shot-talking-face && python3 -B test_script.py --img_path /content/image.png --audio_path /content/audio.wav --phoneme_path /content/test.json --save_dir /content/train")
53
  return "/content/train/image_audio.mp4"
54
 
 
60
  f.seek(0)
61
  sound = AudioSegment.from_file(f.name, format="mp3")
62
  sound.export("/content/audio.wav", format="wav")
63
+ audio_in="/content/audio.wav"
64
+ calculate(image,audio_in)
65
 
66
 
67
 
 
75
  model = models[0].cuda()
76
  TTSHubInterface.update_cfg_with_data_cfg(cfg, task.data_cfg)
77
  generator = task.build_generator([model], cfg)
 
78
 
79
  sample = TTSHubInterface.get_model_input(task, input_text)
80
  sample["net_input"]["src_tokens"] = sample["net_input"]["src_tokens"].cuda()