Spaces:
Sleeping
Sleeping
from huggingface_hub import InferenceClient | |
import gradio as gr | |
import random | |
import prompts | |
from pypipertts import PyPiper | |
pp=PyPiper() | |
#client = InferenceClient("Qwen/QwQ-32B-Preview") | |
client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1") | |
def format_prompt(message, history): | |
prompt="" | |
if history: | |
prompt = "<s>" | |
for user_prompt, bot_response in history: | |
#print (bot_response) | |
prompt += f"[INST] {user_prompt} [/INST]" | |
prompt += f" {bot_response}</s> " | |
prompt += f"[INST] {message} [/INST]" | |
return prompt | |
def generate(prompt,history): | |
if not history: | |
history=[] | |
seed = random.randint(1,9999999999999) | |
print(seed) | |
system_prompt = prompts.ASSISTANT | |
generate_kwargs = dict( | |
temperature=0.9, | |
max_new_tokens=512, | |
top_p=0.95, | |
repetition_penalty=1.0, | |
do_sample=True, | |
seed=seed, | |
) | |
formatted_prompt = format_prompt(f"{system_prompt}, {prompt}", history) | |
stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False) | |
output = "" | |
buf = "" | |
#out = history.append((prompt,"")) | |
for response in stream: | |
output += response.token.text | |
yield [(prompt,output)], "" | |
yield [(prompt,output)], output | |
def tts(inp,voice,length,noise,width,sen_pause): | |
#print(inp) | |
#print(type(inp)) | |
inp=inp.strip("</s>") | |
yield from pp.stream_tts(inp,voice,length,noise,width,sen_pause) | |
def load_mod(model): | |
yield f"Loading: {model}" | |
pp.load_mod(model) | |
yield f"Voice Loaded: {model}" | |
with gr.Blocks() as iface: | |
aud=gr.Audio(streaming=True,autoplay=True) | |
#chatbot=gr.Chatbot(show_label=False, show_share_button=False, show_copy_button=True, likeable=True, layout="panel"), | |
chatbot=gr.Chatbot() | |
prompt = gr.Textbox() | |
with gr.Group(): | |
with gr.Row(): | |
submit_b = gr.Button() | |
stop_b = gr.Button("Stop") | |
clear = gr.ClearButton([chatbot]) | |
with gr.Accordion("Voice Controls",open=False): | |
msg = gr.HTML("""""") | |
names=gr.Dropdown(label="Voice", choices=pp.key_list,value="en_US-joe-medium") | |
length=gr.Slider(label="Length", minimum=0.01, maximum=10.0, value=1) | |
noise=gr.Slider(label="Noise", minimum=0.01, maximum=3.0, value=0.5) | |
width=gr.Slider(label="Noise Width", minimum=0.01, maximum=3.0, value=0.5) | |
sen_pause=gr.Slider(label="Sentence Pause", minimum=0.1, maximum=10.0, value=1) | |
upd_btn=gr.Button("Update") | |
with gr.Row(visible=False): | |
stt=gr.Textbox(visible=True) | |
iface.load(load_mod,names,msg) | |
names.change(load_mod,names,msg) | |
sub_b = submit_b.click(generate, [prompt,chatbot],[chatbot,stt]) | |
#sub_e = prompt.submit(generate, [prompt, chatbot], [chatbot,stt]) | |
stt.change(pp.stream_tts,[stt,names,length,noise,width,sen_pause],aud) | |
stop_b.click(None,None,None, cancels=[sub_b]) | |
iface.queue(default_concurrency_limit=10).launch() | |