guangyil commited on
Commit
af4bca2
·
verified ·
1 Parent(s): d66fc7f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -7
app.py CHANGED
@@ -26,13 +26,13 @@ from huggingface_hub import hf_hub_download
26
 
27
  from infer import load_model, eval_model
28
  from spkr import SpeakerEmbedding
29
-
30
  @spaces.GPU
31
- def dummy_gpu_check():
32
- # Just to let Hugging Face Spaces know this app uses GPU
33
- return torch.cuda.is_available()
34
 
35
- spkr_model = SpeakerEmbedding(device="cuda")
36
  model, tokenizer, tokenizer_voila, model_type = load_model("maitrix-org/Voila-chat", "maitrix-org/Voila-Tokenizer")
37
  default_ref_file = "examples/character_ref_emb_demo.pkl"
38
  default_ref_name = "Homer Simpson"
@@ -63,7 +63,8 @@ def delete_directory(request: gr.Request):
63
  def add_message(history, message):
64
  history.append({"role": "user", "content": {"path": message}})
65
  return history, gr.Audio(value=None), gr.Button(interactive=False)
66
-
 
67
  def call_bot(history, ref_embs, request: gr.Request):
68
  formated_history = {
69
  "instruction": instruction,
@@ -87,7 +88,8 @@ def call_bot(history, ref_embs, request: gr.Request):
87
  history.append({"role": "assistant", "content": {"text": out['text']}})
88
 
89
  return history
90
-
 
91
  def run_tts(text, ref_embs):
92
  formated_history = {
93
  "instruction": "",
 
26
 
27
  from infer import load_model, eval_model
28
  from spkr import SpeakerEmbedding
29
+
30
  @spaces.GPU
31
+ def spkr_model_init():
32
+ spkr_model = SpeakerEmbedding(device="cuda")
33
+ return spkr_model
34
 
35
+ spkr_model = spkr_model_init()
36
  model, tokenizer, tokenizer_voila, model_type = load_model("maitrix-org/Voila-chat", "maitrix-org/Voila-Tokenizer")
37
  default_ref_file = "examples/character_ref_emb_demo.pkl"
38
  default_ref_name = "Homer Simpson"
 
63
  def add_message(history, message):
64
  history.append({"role": "user", "content": {"path": message}})
65
  return history, gr.Audio(value=None), gr.Button(interactive=False)
66
+
67
+ @spaces.GPU
68
  def call_bot(history, ref_embs, request: gr.Request):
69
  formated_history = {
70
  "instruction": instruction,
 
88
  history.append({"role": "assistant", "content": {"text": out['text']}})
89
 
90
  return history
91
+
92
+ @spaces.GPU
93
  def run_tts(text, ref_embs):
94
  formated_history = {
95
  "instruction": "",