Spaces:
Sleeping
Sleeping
File size: 5,095 Bytes
279522e f33a336 279522e 299800e 279522e 299800e f33a336 299800e f33a336 299800e f33a336 279522e f33a336 279522e f33a336 279522e 299800e 6e27adf 279522e 299800e f0646a0 299800e 6e27adf f0646a0 6e27adf 299800e 6e27adf 299800e 6e27adf f0646a0 6e27adf 299800e 6e27adf 299800e 279522e f0646a0 f33a336 299800e f33a336 279522e f33a336 279522e 299800e f33a336 279522e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 |
import gradio as gr
import os, json, re, uuid
from datetime import datetime
from pydub import AudioSegment
import whisper
import requests
# π Load API key securely from environment
groq_key = os.getenv("GROQ_API_KEY")
# π§ Load Whisper model
whisper_model = whisper.load_model("base")
# π¬ Chat function
def chat_with_groq(message, history):
messages = [{"role": "system", "content": "You are JAWERIA'SBOT π€ β cheerful, emoji-savvy, and sleek."}]
messages += history + [{"role": "user", "content": message}]
headers = {
"Authorization": f"Bearer {groq_key}",
"Content-Type": "application/json"
}
payload = {
"model": "llama3-70b-8192",
"messages": messages
}
response = requests.post("https://api.groq.com/openai/v1/chat/completions", headers=headers, json=payload)
reply = response.json()["choices"][0]["message"]["content"]
history += [{"role": "user", "content": message}, {"role": "assistant", "content": reply}]
return "", history, history
# ποΈ Audio transcription
def transcribe_audio(audio_path):
if audio_path is None or not os.path.exists(audio_path):
return "β οΈ No audio recorded."
try:
temp_wav = f"{uuid.uuid4()}.wav"
AudioSegment.from_file(audio_path).export(temp_wav, format="wav")
result = whisper_model.transcribe(temp_wav)
os.remove(temp_wav)
return result["text"]
except Exception as e:
return f"β Transcription error: {e}"
# πΎ Save/load
def save_session(history):
prompt = next((m["content"] for m in history if m["role"] == "user"), "chat")
title = re.sub(r"[^\w\s]", "", prompt).strip()
title = " ".join(title.split()[:6])
timestamp = datetime.now().strftime("%b %d %Y %H-%M")
filename = f"{title} - {timestamp}.json"
with open(filename, "w", encoding="utf-8") as f:
json.dump(history, f, indent=2, ensure_ascii=False)
return f"β
Saved `{filename[:-5]}`"
def list_saved_files():
return sorted([f[:-5] for f in os.listdir() if f.endswith(".json")])
def load_chat(name):
filename = f"{name}.json"
try:
with open(filename, "r", encoding="utf-8") as f:
history = json.load(f)
return history, history, f"β
Loaded `{name}`"
except Exception as e:
return [], [], f"β Load error: {e}"
# π Gradio UI
with gr.Blocks(css="""
body {
background-color: #111 !important;
color: white !important;
}
* {
color: white !important;
}
h1, h2, h3, label, .gr-label, .gr-markdown {
color: white !important;
text-align: center !important;
}
/* Voice box, input, buttons, dropdowns, etc. */
.gr-chatbot-message, .gr-textbox, textarea, input[type='text'], input, select,
.gr-dropdown, .gr-markdown, .gr-label, .gr-button, .gr-audio, .gr-audio-label,
.gr-row, .gr-column, .form, .form-wrap {
background-color: #000 !important;
color: white !important;
border-radius: 8px;
border: 1px solid #333 !important;
}
/* Hover effect for buttons */
.gr-button:hover {
background-color: #222 !important;
}
/* Voice box tweaks */
audio, .gr-audio-box {
background-color: #000 !important;
color: white !important;
}
/* Make dropdown scrollable */
.gr-dropdown {
max-height: 150px;
overflow-y: auto;
}
""") as demo:
# ... same interface elements and event bindings as before ...
state = gr.State([])
gr.Markdown("# β¨ JAWERIA'SBOT π€")
gr.Markdown("<div style='text-align:center;'>Speak or type β your assistant listens and replies with text π¬</div>")
chatbot = gr.Chatbot(type="messages", height=350)
chat_input = gr.Textbox(label="π¬ Message", placeholder="Type or speak your message...")
send_btn = gr.Button("Send π")
with gr.Row():
voice_input = gr.Audio(label="π€ Speak", type="filepath", interactive=True)
voice_btn = gr.Button("ποΈ Transcribe to Text")
with gr.Row():
new_chat_btn = gr.Button("π New")
save_btn = gr.Button("πΎ Save")
saved_dropdown = gr.Dropdown(label="π Load Saved", choices=list_saved_files())
load_btn = gr.Button("π₯ Load")
save_msg = gr.Markdown()
load_msg = gr.Markdown()
# π Bind events
send_btn.click(chat_with_groq, inputs=[chat_input, state], outputs=[chat_input, chatbot, state])
chat_input.submit(chat_with_groq, inputs=[chat_input, state], outputs=[chat_input, chatbot, state])
voice_btn.click(transcribe_audio, inputs=[voice_input], outputs=[chat_input])
new_chat_btn.click(fn=lambda: ("", [], []), outputs=[chat_input, chatbot, state])
save_btn.click(fn=save_session, inputs=[state], outputs=[save_msg])
save_btn.click(fn=list_saved_files, outputs=[saved_dropdown])
load_btn.click(fn=load_chat, inputs=[saved_dropdown], outputs=[chatbot, state, load_msg])
demo.load(fn=list_saved_files, outputs=[saved_dropdown])
demo.launch()
|