Spaces:
Running
Running
File size: 5,214 Bytes
ab9f19a 279522e f33a336 573da01 279522e e8d87fd 299800e f33a336 e8d87fd 573da01 e8d87fd 573da01 e8d87fd f33a336 e8d87fd f33a336 e8d87fd f33a336 e8d87fd f33a336 e8d87fd f33a336 e8d87fd f33a336 e8d87fd f33a336 279522e e8d87fd f33a336 279522e f33a336 279522e e8d87fd 279522e e8d87fd d0bfe9a e8d87fd d0bfe9a e8d87fd d0bfe9a e8d87fd 279522e f33a336 e8d87fd b5f1961 d0bfe9a e8d87fd d0bfe9a f33a336 279522e e8d87fd f33a336 e8d87fd f33a336 573da01 279522e e8d87fd f33a336 e8d87fd d0bfe9a e8d87fd 279522e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 |
import os
os.system("pip install -q git+https://github.com/openai/whisper.git")
import gradio as gr
import os, json, re, uuid
from datetime import datetime
from pydub import AudioSegment
import whisper
import requests
import emoji
# Load keys and models
groq_key = os.getenv("GROQ_API_KEY")
whisper_model = whisper.load_model("base")
# Emoji filter for assistant replies
def filter_emojis(text):
allowed = {"π", "π", "π", "π€", "β¨", "π", "π¬", "π", "π", "π’", "π§ ", "β
"}
return "".join(char if char not in emoji.EMOJI_DATA or char in allowed else "" for char in text)
# Chat function using Groq API
def chat_with_groq(message, history):
messages = [{"role": "system", "content": "You are JAWERIA'SBOT π€ β cheerful, emoji-savvy, and sleek."}]
messages += history + [{"role": "user", "content": message}]
headers = {
"Authorization": f"Bearer {groq_key}",
"Content-Type": "application/json"
}
payload = {"model": "llama3-70b-8192", "messages": messages}
res = requests.post("https://api.groq.com/openai/v1/chat/completions", headers=headers, json=payload)
reply = filter_emojis(res.json()["choices"][0]["message"]["content"])
history += [{"role": "user", "content": message}, {"role": "assistant", "content": reply}]
return "", history, history
# Transcribe audio from mic or file
def transcribe_audio(audio_path):
if not audio_path or not os.path.exists(audio_path):
return "β οΈ No audio available."
try:
temp_wav = f"{uuid.uuid4()}.wav"
AudioSegment.from_file(audio_path).export(temp_wav, format="wav")
result = whisper_model.transcribe(temp_wav)
os.remove(temp_wav)
return result["text"]
except Exception as e:
return f"β Transcription error: {e}"
# Save/load chat sessions
def save_session(history):
title = re.sub(r"[^\w\s]", "", next((m["content"] for m in history if m["role"] == "user"), "chat")).strip()
title = " ".join(title.split()[:6])
filename = f"{title} - {datetime.now().strftime('%b %d %Y %H-%M')}.json"
with open(filename, "w", encoding="utf-8") as f:
json.dump(history, f, indent=2, ensure_ascii=False)
return f"β
Saved `{filename[:-5]}`"
def list_saved_files():
return sorted([f[:-5] for f in os.listdir() if f.endswith(".json")])
def load_chat(name):
try:
with open(f"{name}.json", "r", encoding="utf-8") as f:
history = json.load(f)
return history, history, f"β
Loaded `{name}`"
except Exception as e:
return [], [], f"β Load error: {e}"
# Interface
with gr.Blocks(css="""
body { background: #111; color: white; font-family: 'Segoe UI'; }
textarea, input[type='text'] {
background-color: #222; color: white; border-radius: 30px;
padding: 10px 15px; border: 1px solid #444; height: 48px;
}
.gr-button {
background-color: #333 !important; color: white !important;
border: 1px solid #444; border-radius: 10px; font-weight: bold;
}
.gr-button:hover { background: #555 !important; }
.gr-dropdown { background: #222; color: white; max-height: 200px; overflow-y: auto; }
.gr-chatbot-message { background: #292929 !important; color: white; border-radius: 10px; }
""") as demo:
state = gr.State([])
with gr.Row():
chat_input = gr.Textbox(placeholder="Type or speak...", label=None, scale=9)
record_btn = gr.Button("ποΈ Record", scale=1)
upload_btn = gr.File(file_types=[".mp3", ".wav", ".m4a"], label="Upload Audio File", visible=False)
chatbot = gr.Chatbot(label="JAWERIA'SBOT π€", height=400)
send_btn = gr.Button("Send π")
with gr.Row():
new_btn = gr.Button("π New")
save_btn = gr.Button("πΎ Save")
dropdown = gr.Dropdown(label="π Load Saved", choices=list_saved_files(), interactive=True)
load_btn = gr.Button("π₯ Load")
save_msg = gr.Markdown()
load_msg = gr.Markdown()
recording = gr.Audio(source="microphone", type="filepath", visible=False)
def toggle_recording(is_recording):
return not is_recording, gr.update(visible=not is_recording)
def start_record(): return gr.update(visible=True)
def stop_record(path): return transcribe_audio(path)
record_state = gr.State(False)
record_btn.click(start_record, outputs=recording).then(
fn=lambda: True, outputs=record_state)
recording.change(fn=stop_record, inputs=recording, outputs=chat_input)
upload_btn.change(fn=transcribe_audio, inputs=upload_btn, outputs=chat_input)
send_btn.click(chat_with_groq, inputs=[chat_input, state], outputs=[chat_input, chatbot, state])
chat_input.submit(chat_with_groq, inputs=[chat_input, state], outputs=[chat_input, chatbot, state])
new_btn.click(lambda: ("", [], []), outputs=[chat_input, chatbot, state])
save_btn.click(save_session, inputs=[state], outputs=[save_msg])
save_btn.click(list_saved_files, outputs=[dropdown])
load_btn.click(load_chat, inputs=[dropdown], outputs=[chatbot, state, load_msg])
demo.load(list_saved_files, outputs=[dropdown])
gr.Markdown("<center><small>Made with π€ by Jaweria</small></center>")
demo.launch()
|