Spaces:
Sleeping
Sleeping
File size: 5,783 Bytes
279522e f33a336 573da01 279522e 299800e f33a336 573da01 f33a336 4efd4d3 573da01 f33a336 279522e f33a336 279522e f33a336 279522e d0bfe9a 4efd4d3 d0bfe9a 279522e f33a336 d0bfe9a f33a336 299800e 573da01 f33a336 573da01 f33a336 d0bfe9a f33a336 279522e f33a336 d0bfe9a f33a336 573da01 279522e d0bfe9a f33a336 d0bfe9a f33a336 279522e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 |
import gradio as gr
import os, json, re, uuid
from datetime import datetime
from pydub import AudioSegment
import whisper
import requests
import emoji
groq_key = os.getenv("GROQ_API_KEY")
whisper_model = whisper.load_model("base")
def filter_emojis(text):
allowed_emojis = {"π", "π", "π", "π€", "β¨", "π", "π¬", "π", "π", "π’", "π§ ", "β
"}
return "".join(char if char not in emoji.EMOJI_DATA or char in allowed_emojis else "" for char in text)
def chat_with_groq(message, history):
messages = [{"role": "system", "content": "You are JAWERIA'SBOT π€ β cheerful, emoji-savvy, and sleek."}]
messages += history + [{"role": "user", "content": message}]
headers = {
"Authorization": f"Bearer {groq_key}",
"Content-Type": "application/json"
}
payload = {
"model": "llama3-70b-8192",
"messages": messages
}
response = requests.post("https://api.groq.com/openai/v1/chat/completions", headers=headers, json=payload)
if response.status_code != 200:
return "", history, f"β API Error {response.status_code}: {response.text}"
raw_reply = response.json()["choices"][0]["message"]["content"]
reply = filter_emojis(raw_reply)
history += [{"role": "user", "content": message}, {"role": "assistant", "content": reply}]
return "", history, history
def transcribe_audio(audio_path):
if audio_path is None or not os.path.exists(audio_path):
return "β οΈ No audio recorded."
try:
temp_wav = f"{uuid.uuid4()}.wav"
AudioSegment.from_file(audio_path).export(temp_wav, format="wav")
result = whisper_model.transcribe(temp_wav)
os.remove(temp_wav)
return result["text"]
except Exception as e:
return f"β Transcription error: {e}"
def save_session(history):
prompt = next((m["content"] for m in history if m["role"] == "user"), "chat")
title = re.sub(r"[^\w\s]", "", prompt).strip()
title = " ".join(title.split()[:6])
timestamp = datetime.now().strftime("%b %d %Y %H-%M")
filename = f"{title} - {timestamp}.json"
with open(filename, "w", encoding="utf-8") as f:
json.dump(history, f, indent=2, ensure_ascii=False)
return f"β
Saved `{filename[:-5]}`"
def list_saved_files():
return sorted([f[:-5] for f in os.listdir() if f.endswith(".json")])
def load_chat(name):
filename = f"{name}.json"
try:
with open(filename, "r", encoding="utf-8") as f:
history = json.load(f)
return history, history, f"β
Loaded `{name}`"
except Exception as e:
return [], [], f"β Load error: {e}"
with gr.Blocks(css="""
body { background-color: #111; font-family: 'Segoe UI', sans-serif; }
.gr-chatbot { border: 1px solid #333; background-color: #1e1e1e; border-radius: 10px; }
.gr-chatbot-message { background-color: #292929; color: #f1f1f1; border-radius: 10px; margin-bottom: 5px; }
textarea, input[type='text'] {
background-color: #222; color: #fff; border-radius: 30px !important;
border: 1px solid #444; padding: 10px 50px 10px 15px; height: 48px;
}
.gr-button {
background-color: #333 !important; color: #fff !important;
border: 1px solid #444 !important; border-radius: 8px;
font-weight: bold; transition: background-color 0.2s;
}
.gr-button:hover { background-color: #555 !important; cursor: pointer; }
.gr-dropdown { background-color: #222; color: #fff; max-height: 200px; overflow-y: auto; }
h1, h2, h3, .gr-markdown { color: #f1f1f1; text-align: center; }
input[type="file"] { color: white; }
#mic-dropdown { position: absolute; right: 20px; top: 8px; }
""") as demo:
state = gr.State([])
audio_file = gr.File(visible=False)
gr.Markdown("# β¨ JAWERIA'SBOT π€")
gr.Markdown("Speak or type β your assistant listens and replies with text and emojis π¬")
chatbot = gr.Chatbot(type="messages", height=400)
with gr.Row():
chat_input = gr.Textbox(label="π¬", placeholder="Type your message...", scale=9)
mic_menu = gr.Dropdown(choices=["ποΈ Record Audio", "π Upload File"], label="", value=None, scale=1, interactive=True)
send_btn = gr.Button("Send π")
with gr.Row():
new_chat_btn = gr.Button("π New")
save_btn = gr.Button("πΎ Save")
saved_dropdown = gr.Dropdown(label="π Load Saved", choices=list_saved_files(), interactive=True)
load_btn = gr.Button("π₯ Load")
save_msg = gr.Markdown()
load_msg = gr.Markdown()
# Event handlers
send_btn.click(chat_with_groq, inputs=[chat_input, state], outputs=[chat_input, chatbot, state])
chat_input.submit(chat_with_groq, inputs=[chat_input, state], outputs=[chat_input, chatbot, state])
def handle_mic_choice(choice):
if choice == "ποΈ Record Audio":
return gr.Audio(source="microphone", type="filepath", label="Recording... ποΈ", interactive=True, visible=True)
elif choice == "π Upload File":
return gr.Audio(source="upload", type="filepath", label="Upload Audio π", interactive=True, visible=True)
else:
return gr.update(visible=False)
mic_audio = gr.Audio(visible=False, type="filepath")
mic_menu.change(handle_mic_choice, inputs=mic_menu, outputs=mic_audio)
mic_audio.change(transcribe_audio, inputs=mic_audio, outputs=chat_input)
new_chat_btn.click(fn=lambda: ("", [], []), outputs=[chat_input, chatbot, state])
save_btn.click(fn=save_session, inputs=[state], outputs=[save_msg])
save_btn.click(fn=list_saved_files, outputs=[saved_dropdown])
load_btn.click(fn=load_chat, inputs=[saved_dropdown], outputs=[chatbot, state, load_msg])
demo.load(fn=list_saved_files, outputs=[saved_dropdown])
demo.launch()
|