Spaces:
Sleeping
Sleeping
File size: 4,479 Bytes
279522e f33a336 279522e f33a336 279522e f33a336 279522e f33a336 279522e f33a336 279522e f33a336 279522e f33a336 279522e f33a336 279522e f33a336 279522e f33a336 279522e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 |
import gradio as gr
import os, json, re, uuid
from datetime import datetime
from pydub import AudioSegment
import whisper
import requests
# π API key
groq_key = "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" # Replace with your actual Groq key
# π§ Load Whisper locally
whisper_model = whisper.load_model("base") # Use "small"/"medium"/"large" for better results
# π¬ Chat using Groq
def chat_with_groq(message, history):
messages = [{"role": "system", "content": "You are JAWERIA'SBOT π€ β cheerful, emoji-savvy, and sleek."}]
messages += history + [{"role": "user", "content": message}]
headers = {
"Authorization": f"Bearer {groq_key}",
"Content-Type": "application/json"
}
payload = {
"model": "llama3-70b-8192",
"messages": messages
}
response = requests.post("https://api.groq.com/openai/v1/chat/completions", headers=headers, json=payload)
reply = response.json()["choices"][0]["message"]["content"]
history += [{"role": "user", "content": message}, {"role": "assistant", "content": reply}]
return "", history, history
# ποΈ Transcribe voice to text
def transcribe_audio(audio_path):
if audio_path is None or not os.path.exists(audio_path):
return "β οΈ No audio recorded."
try:
temp_wav = f"{uuid.uuid4()}.wav"
AudioSegment.from_file(audio_path).export(temp_wav, format="wav")
result = whisper_model.transcribe(temp_wav)
os.remove(temp_wav)
return result["text"]
except Exception as e:
return f"β Transcription error: {e}"
# πΎ Save/load
def save_session(history):
prompt = next((m["content"] for m in history if m["role"] == "user"), "chat")
title = re.sub(r"[^\w\s]", "", prompt).strip()
title = " ".join(title.split()[:6])
timestamp = datetime.now().strftime("%b %d %Y %H-%M")
filename = f"{title} - {timestamp}.json"
with open(filename, "w", encoding="utf-8") as f:
json.dump(history, f, indent=2, ensure_ascii=False)
return f"β
Saved `{filename[:-5]}`"
def list_saved_files():
return sorted([f[:-5] for f in os.listdir() if f.endswith(".json")])
def load_chat(name):
filename = f"{name}.json"
try:
with open(filename, "r", encoding="utf-8") as f:
history = json.load(f)
return history, history, f"β
Loaded `{name}`"
except Exception as e:
return [], [], f"β Load error: {e}"
# π Gradio UI
with gr.Blocks(css="""
body {background-color: #111 !important; color: white;}
.gr-chatbot-message {background-color: #222 !important; color: white !important; border-radius: 6px;}
input[type='text'], textarea, select, .gr-textbox {background-color: #222 !important; color: white !important;}
.gr-button {background-color: #007acc !important; color: white !important;}
""") as demo:
state = gr.State([])
gr.Markdown("<h1 style='text-align:center; color:#00ccff;'>β¨ JAWERIA'SBOT π€</h1>")
gr.Markdown("<div style='text-align:center;'>Speak or type β your assistant listens and replies with text π¬</div>")
chatbot = gr.Chatbot(type="messages", height=350)
chat_input = gr.Textbox(label="π¬ Message", placeholder="Type or speak your message...")
send_btn = gr.Button("Send π")
with gr.Row():
voice_input = gr.Audio(label="π€ Speak", type="filepath", interactive=True)
voice_btn = gr.Button("ποΈ Transcribe to Text")
with gr.Row():
new_chat_btn = gr.Button("π New")
save_btn = gr.Button("πΎ Save")
saved_dropdown = gr.Dropdown(label="π Load Saved", choices=list_saved_files())
load_btn = gr.Button("π₯ Load")
save_msg = gr.Markdown()
load_msg = gr.Markdown()
# π Bind actions
send_btn.click(chat_with_groq, inputs=[chat_input, state], outputs=[chat_input, chatbot, state])
chat_input.submit(chat_with_groq, inputs=[chat_input, state], outputs=[chat_input, chatbot, state])
voice_btn.click(transcribe_audio, inputs=[voice_input], outputs=[chat_input])
new_chat_btn.click(fn=lambda: ("", [], []), outputs=[chat_input, chatbot, state])
save_btn.click(fn=save_session, inputs=[state], outputs=[save_msg])
save_btn.click(fn=list_saved_files, outputs=[saved_dropdown])
load_btn.click(fn=load_chat, inputs=[saved_dropdown], outputs=[chatbot, state, load_msg])
demo.load(fn=list_saved_files, outputs=[saved_dropdown])
demo.launch()
|