broadfield commited on
Commit
0064343
·
verified ·
1 Parent(s): 271f307

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +27 -35
app.py CHANGED
@@ -2,6 +2,10 @@ from huggingface_hub import InferenceClient
2
  import gradio as gr
3
  import random
4
  import prompts
 
 
 
 
5
  client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
6
 
7
  def format_prompt(message, history):
@@ -33,39 +37,27 @@ def generate(prompt,history,max_new_tokens,seed):
33
 
34
  for response in stream:
35
  output += response.token.text
36
- yield output
 
37
 
38
- additional_inputs = [
39
- gr.Slider(
40
- label="Max new tokens",
41
- value=4096,
42
- minimum=10,
43
- maximum=1048*10,
44
- step=64,
45
- interactive=True,
46
- info="The maximum numbers of words the chatbot will return",
47
- ),
48
-
49
- gr.Slider(
50
- label="Seed",
51
- value=random.randint(1,9999999999999999),
52
- minimum=0,
53
- maximum=9999999999999999,
54
- step=64,
55
- interactive=True,
56
- info="Each seed produces a different output to a single prompt",
57
- ),
58
- ]
59
- examples=[["Write a heavy metal rock song about horses (1000 words)"],
60
- ["Write a really long song about an elephant love story. The elephants have been together for a long time. (3000 words)"],
61
- ["Write a slammin rap song about about a gangster fising trip."],
62
- ["Use the same lyrics as above, but add more words to each verse"],
63
- ]
64
- gr.ChatInterface(
65
- fn=generate,
66
- chatbot=gr.Chatbot(show_label=False, show_share_button=False, show_copy_button=True, layout="panel"),
67
- title="Song Writer",
68
- additional_inputs=additional_inputs,
69
- examples=examples,
70
- concurrency_limit=20,
71
- ).launch(show_api=False)
 
2
  import gradio as gr
3
  import random
4
  import prompts
5
+ from pypipertts import PyPiper
6
+ pp=PyPiper()
7
+ pp.load_mod(instr="en_US-joe-medium")
8
+
9
  client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
10
 
11
  def format_prompt(message, history):
 
37
 
38
  for response in stream:
39
  output += response.token.text
40
+ #yield output
41
+ yield from pp.stream_ttx(output)
42
 
43
+ with gr.Blocks() as iface:
44
+ gr.HTML("""""")
45
+ aud=gr.Audio(streaming=True,autoplay=True)
46
+ #chatbot=gr.Chatbot(show_label=False, show_share_button=False, show_copy_button=True, likeable=True, layout="panel"),
47
+ chatbot=gr.Chatbot()
48
+ msg = gr.Textbox()
49
+ with gr.Row():
50
+ submit_b = gr.Button()
51
+ stop_b = gr.Button("Stop")
52
+ clear = gr.ClearButton([msg, chatbot])
53
+ sumbox=gr.Textbox("Summary", max_lines=100)
54
+ with gr.Column():
55
+ sum_out_box=gr.JSON(label="Summaries")
56
+ hist_out_box=gr.JSON(label="History")
57
+
58
+ #sub_b = submit_b.click(generate, [msg,chatbot],[msg,chatbot,sumbox,sum_out_box,hist_out_box])
59
+ #sub_e = msg.submit(generate, [msg, chatbot], [msg, chatbot,sumbox,sum_out_box,hist_out_box])
60
+ sub_b = submit_b.click(generate, [msg,chatbot],aud)
61
+ sub_e = msg.submit(generate, [msg, chatbot], aud)
62
+ stop_b.click(None,None,None, cancels=[sub_b,sub_e])
63
+ iface.queue(default_concurrency_limit=10).launch()