Spaces:
Sleeping
Sleeping
File size: 1,986 Bytes
0fb9914 721d312 a41aadd 19ae010 0fb9914 19ae010 721d312 19ae010 721d312 19ae010 bb4138c 19ae010 a41aadd 188e69e 19ae010 188e69e a41aadd 19ae010 a41aadd 721d312 a41aadd 188e69e 19ae010 188e69e a41aadd 19ae010 a41aadd 19ae010 a41aadd 188e69e 19ae010 b469735 637e644 19ae010 6d59737 637e644 188e69e 19ae010 a41aadd 19ae010 3b96490 637e644 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 |
import os
import gradio as gr
from openai import OpenAI
import tempfile
import torch
import whisper
# Load API key from environment
GROQ_API_KEY = os.environ.get("GROQ_API_KEY")
# Initialize client for Groq-compatible OpenAI API
client = OpenAI(
api_key=GROQ_API_KEY,
base_url="https://api.groq.com/openai/v1"
)
# Load Whisper model
whisper_model = whisper.load_model("base")
# Chat history storage
chat_history = []
def chat_with_bot(message, history):
global chat_history
chat_history = history or []
# Append user message
chat_history.append({"role": "user", "content": message})
# Get model response
response = client.chat.completions.create(
model="llama3-8b-8192",
messages=chat_history
)
reply = response.choices[0].message.content
# Append assistant reply
chat_history.append({"role": "assistant", "content": reply})
return chat_history, chat_history
def transcribe_audio(audio_file):
if audio_file is None:
return ""
audio = whisper.load_audio(audio_file)
audio = whisper.pad_or_trim(audio)
mel = whisper.log_mel_spectrogram(audio).to(whisper_model.device)
options = whisper.DecodingOptions(fp16=torch.cuda.is_available())
result = whisper.decode(whisper_model, mel, options)
return result.text
with gr.Blocks() as demo:
gr.Markdown("# 🤖 Neobot - Chat with Voice, File & Text")
chatbot = gr.Chatbot(type="messages", label="Chat")
state = gr.State([])
with gr.Row():
txt = gr.Textbox(placeholder="Type a message or upload audio/file...", show_label=False)
send_btn = gr.Button("Send")
with gr.Row():
audio_upload = gr.Audio(type="filepath", label="Upload Audio") # Removed `source=`
transcribe_btn = gr.Button("Transcribe Audio")
send_btn.click(chat_with_bot, inputs=[txt, state], outputs=[chatbot, state])
transcribe_btn.click(transcribe_audio, inputs=audio_upload, outputs=txt)
demo.launch()
|