JARVIS / app.py
seawolf2357's picture
Update app.py
db675d1 verified
import os
import re
import gradio as gr
import asyncio
import time
import tempfile
from huggingface_hub import InferenceClient
from gtts import gTTS
import speech_recognition as sr
# ...
async def generate_audio(prompt):
# ์Œ์„ฑ ์ธ์‹
r = sr.Recognizer()
with sr.Microphone() as source:
print("Speak:")
audio = r.listen(source)
try:
text = r.recognize_google(audio)
except:
return "Could not understand audio"
# LLM ๋ชจ๋ธ์— ์ž…๋ ฅ
generate_kwargs = dict(
temperature=0.6,
max_new_tokens=256,
top_p=0.95,
repetition_penalty=1,
do_sample=True,
seed=42,
)
formatted_prompt = system_instructions1 + text + "[JARVIS]"
stream = client1.text_generation(
formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=True)
output = ""
for response in stream:
output += response.token.text
# ์Œ์„ฑ ์ถœ๋ ฅ
tts = gTTS(output, lang="ko")
with tempfile.NamedTemporaryFile(delete=False, suffix=".mp3") as tmp_file:
tmp_path = tmp_file.name
tts.save(tmp_path)
return tmp_path
# ...
with gr.Blocks(css="style.css") as demo:
with gr.Row():
user_input = gr.Textbox(label="Prompt", value="What is Wikipedia")
input_text = gr.Textbox(label="Input Text", elem_id="important")
output_audio = gr.Audio(label="JARVIS", type="filepath",
interactive=False,
autoplay=True,
elem_classes="audio")
translate_btn = gr.Button("Response")
with gr.Row():
translate_btn.click(fn=generate1, inputs=user_input,
outputs=output_audio, api_name="translate")
translate_btn.click(fn=generate_audio, inputs=user_input,
outputs=output_audio, api_name="generate_audio")
with gr.Row():
gr.Markdown(MORE)
if __name__ == "__main__":
demo.queue(max_size=200).launch()