Spaces:
Running
Running
File size: 5,252 Bytes
279522e f33a336 573da01 279522e 299800e f33a336 573da01 f33a336 4efd4d3 573da01 f33a336 279522e f33a336 279522e f33a336 279522e 4efd4d3 279522e f33a336 299800e 573da01 f33a336 573da01 f33a336 279522e f33a336 573da01 279522e f33a336 279522e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 |
import gradio as gr
import os, json, re, uuid
from datetime import datetime
from pydub import AudioSegment
import whisper
import requests
import emoji
groq_key = os.getenv("GROQ_API_KEY")
whisper_model = whisper.load_model("base")
def filter_emojis(text):
allowed_emojis = {"π", "π", "π", "π€", "β¨", "π", "π¬", "π", "π", "π’", "π§ ", "β
"}
return "".join(char if char not in emoji.EMOJI_DATA or char in allowed_emojis else "" for char in text)
def chat_with_groq(message, history):
messages = [{"role": "system", "content": "You are JAWERIA'SBOT π€ β cheerful, emoji-savvy, and sleek."}]
messages += history + [{"role": "user", "content": message}]
headers = {
"Authorization": f"Bearer {groq_key}",
"Content-Type": "application/json"
}
payload = {
"model": "llama3-70b-8192",
"messages": messages
}
response = requests.post("https://api.groq.com/openai/v1/chat/completions", headers=headers, json=payload)
if response.status_code != 200:
return "", history, f"β API Error {response.status_code}: {response.text}"
raw_reply = response.json()["choices"][0]["message"]["content"]
reply = filter_emojis(raw_reply)
history += [{"role": "user", "content": message}, {"role": "assistant", "content": reply}]
return "", history, history
def transcribe_audio(audio_path):
if audio_path is None or not os.path.exists(audio_path):
return "β οΈ No audio recorded."
try:
temp_wav = f"{uuid.uuid4()}.wav"
AudioSegment.from_file(audio_path).export(temp_wav, format="wav")
result = whisper_model.transcribe(temp_wav)
os.remove(temp_wav)
return result["text"]
except Exception as e:
return f"β Transcription error: {e}"
def save_session(history):
prompt = next((m["content"] for m in history if m["role"] == "user"), "chat")
title = re.sub(r"[^\w\s]", "", prompt).strip()
title = " ".join(title.split()[:6])
timestamp = datetime.now().strftime("%b %d %Y %H-%M")
filename = f"{title} - {timestamp}.json"
with open(filename, "w", encoding="utf-8") as f:
json.dump(history, f, indent=2, ensure_ascii=False)
return f"β
Saved `{filename[:-5]}`"
def list_saved_files():
return sorted([f[:-5] for f in os.listdir() if f.endswith(".json")])
def load_chat(name):
filename = f"{name}.json"
try:
with open(filename, "r", encoding="utf-8") as f:
history = json.load(f)
return history, history, f"β
Loaded `{name}`"
except Exception as e:
return [], [], f"β Load error: {e}"
with gr.Blocks(css="""
body { background-color: #111 !important; font-family: 'Segoe UI', sans-serif !important; }
.gr-chatbot { border-radius: 10px; border: 1px solid #333; background-color: #1e1e1e !important; }
.gr-chatbot-message { background-color: #292929 !important; color: #f1f1f1 !important; border-radius: 10px; margin-bottom: 5px; }
textarea, input[type='text'] { background-color: #222 !important; color: #fff !important; border-radius: 8px !important; border: 1px solid #444 !important; padding: 10px; }
.gr-button { background-color: #333 !important; color: #fff !important; border: 1px solid #444 !important; border-radius: 8px; padding: 8px 14px; font-weight: bold; transition: background-color 0.2s; }
.gr-button:hover { background-color: #555 !important; cursor: pointer; }
.gr-dropdown { background-color: #222 !important; color: #fff !important; border-radius: 8px; border: 1px solid #444; padding: 8px; }
h1, h2, h3, .gr-markdown { color: #f1f1f1 !important; text-align: center; }
audio { background-color: #000 !important; border: 1px solid #333 !important; }
""") as demo:
state = gr.State([])
gr.Markdown("# β¨ JAWERIA'SBOT π€")
gr.Markdown("Speak or type β your assistant listens and replies with text and emojis π¬")
chatbot = gr.Chatbot(type="messages", height=400)
chat_input = gr.Textbox(label="π¬ Message", placeholder="Type or speak your message...")
send_btn = gr.Button("Send π")
with gr.Row():
voice_input = gr.Audio(label="π€ Speak", type="filepath", interactive=True)
voice_btn = gr.Button("ποΈ Transcribe to Text")
with gr.Row():
new_chat_btn = gr.Button("π New")
save_btn = gr.Button("πΎ Save")
saved_dropdown = gr.Dropdown(label="π Load Saved", choices=list_saved_files())
load_btn = gr.Button("π₯ Load")
save_msg = gr.Markdown()
load_msg = gr.Markdown()
send_btn.click(chat_with_groq, inputs=[chat_input, state], outputs=[chat_input, chatbot, state])
chat_input.submit(chat_with_groq, inputs=[chat_input, state], outputs=[chat_input, chatbot, state])
voice_btn.click(transcribe_audio, inputs=[voice_input], outputs=[chat_input])
new_chat_btn.click(fn=lambda: ("", [], []), outputs=[chat_input, chatbot, state])
save_btn.click(fn=save_session, inputs=[state], outputs=[save_msg])
save_btn.click(fn=list_saved_files, outputs=[saved_dropdown])
load_btn.click(fn=load_chat, inputs=[saved_dropdown], outputs=[chatbot, state, load_msg])
demo.load(fn=list_saved_files, outputs=[saved_dropdown])
demo.launch()
|