Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -40,8 +40,8 @@ def generate(prompt,history):
|
|
40 |
buf = ""
|
41 |
for response in stream:
|
42 |
output += response.token.text
|
43 |
-
yield [(prompt,output)]
|
44 |
-
yield [(prompt,output)]
|
45 |
|
46 |
def load_mod(model):
|
47 |
yield f"Loading: {model}"
|
@@ -50,13 +50,13 @@ def load_mod(model):
|
|
50 |
def tts(inp,names,length,noise,width,sen_pause):
|
51 |
yield from pp.stream_tts(inp[-1][-1],names,length,noise,width,sen_pause)
|
52 |
with gr.Blocks() as iface:
|
53 |
-
aud=gr.Audio(streaming=True, autoplay=True)
|
54 |
chatbot=gr.Chatbot(show_label=False, show_share_button=False, show_copy_button=True, layout="panel")
|
55 |
-
prompt = gr.Textbox()
|
56 |
with gr.Row():
|
57 |
submit_b = gr.Button()
|
58 |
stop_b = gr.Button("Stop")
|
59 |
clear = gr.ClearButton([chatbot,prompt,aud])
|
|
|
60 |
with gr.Accordion("Voice Controls",open=False):
|
61 |
msg = gr.HTML("""""")
|
62 |
names=gr.Dropdown(label="Voice", choices=pp.key_list,value="en_US-ryan-high")
|
|
|
40 |
buf = ""
|
41 |
for response in stream:
|
42 |
output += response.token.text
|
43 |
+
yield [(prompt,output)]
|
44 |
+
yield [(prompt,output)]
|
45 |
|
46 |
def load_mod(model):
|
47 |
yield f"Loading: {model}"
|
|
|
50 |
def tts(inp,names,length,noise,width,sen_pause):
|
51 |
yield from pp.stream_tts(inp[-1][-1],names,length,noise,width,sen_pause)
|
52 |
with gr.Blocks() as iface:
|
|
|
53 |
chatbot=gr.Chatbot(show_label=False, show_share_button=False, show_copy_button=True, layout="panel")
|
54 |
+
prompt = gr.Textbox(label="Prompt")
|
55 |
with gr.Row():
|
56 |
submit_b = gr.Button()
|
57 |
stop_b = gr.Button("Stop")
|
58 |
clear = gr.ClearButton([chatbot,prompt,aud])
|
59 |
+
aud=gr.Audio(streaming=True, autoplay=True)
|
60 |
with gr.Accordion("Voice Controls",open=False):
|
61 |
msg = gr.HTML("""""")
|
62 |
names=gr.Dropdown(label="Voice", choices=pp.key_list,value="en_US-ryan-high")
|