general_chatbot / app.py
JaweriaGenAI's picture
Update app.py
ec2b48a verified
raw
history blame
5.59 kB
import os
os.system("pip install -q openai-whisper")
import gradio as gr
import json, re, uuid
from datetime import datetime
from pydub import AudioSegment
import whisper
import requests
import emoji
# Load Groq API key and Whisper model
groq_key = os.getenv("GROQ_API_KEY")
whisper_model = whisper.load_model("base")
# Emoji filtering
allowed_emojis = {"😊", "πŸ˜„", "πŸ‘", "πŸ€–", "✨", "πŸŽ‰", "πŸ’¬", "πŸ™Œ", "😎", "πŸ“’", "🧠", "βœ…"}
def filter_emojis(text):
return "".join(char if char not in emoji.EMOJI_DATA or char in allowed_emojis else "" for char in text)
# Save/load
CHAT_DIR = "saved_chats"
os.makedirs(CHAT_DIR, exist_ok=True)
def save_chat_auto(history):
prompt = next((m["content"] for m in history if m["role"] == "user"), "Chat")
title = re.sub(r"[^\w\s]", "", prompt).strip()
title = " ".join(title.split()[:5]) or "Chat"
timestamp = datetime.now().strftime("%b %d %Y %H-%M-%S")
filename = f"{title} - {timestamp}.json"
with open(os.path.join(CHAT_DIR, filename), "w", encoding="utf-8") as f:
json.dump(history, f, indent=2, ensure_ascii=False)
return filename
def list_saved_chats():
return sorted([f for f in os.listdir(CHAT_DIR) if f.endswith(".json")])
def load_chat_file(filename):
try:
with open(os.path.join(CHAT_DIR, filename), "r", encoding="utf-8") as f:
history = json.load(f)
chat_display = [(m["content"], None) if m["role"] == "user" else (None, m["content"]) for m in history]
return chat_display, history
except Exception as e:
print(f"Load error: {e}")
return [], []
# Chat handler
def chat_with_groq(message, history):
try:
messages = [{"role": "system", "content": "You are Neobot – a helpful, professional assistant."}]
messages += history + [{"role": "user", "content": message}]
headers = {"Authorization": f"Bearer {groq_key}", "Content-Type": "application/json"}
payload = {"model": "llama3-70b-8192", "messages": messages}
res = requests.post("https://api.groq.com/openai/v1/chat/completions", headers=headers, json=payload)
reply = res.json()["choices"][0]["message"]["content"]
reply = filter_emojis(reply)
history += [{"role": "user", "content": message}, {"role": "assistant", "content": reply}]
save_chat_auto(history)
chat_display = [(m["content"], None) if m["role"] == "user" else (None, m["content"]) for m in history]
return "", chat_display, history, refresh_chat_list()
except Exception as e:
print(f"Chat error: {e}")
return "", [(None, "❌ Error occurred while responding.")], history, refresh_chat_list()
# Transcribe audio
def transcribe_audio(audio_path):
if not audio_path or not os.path.exists(audio_path): return ""
try:
temp_wav = f"{uuid.uuid4()}.wav"
AudioSegment.from_file(audio_path).export(temp_wav, format="wav")
result = whisper_model.transcribe(temp_wav)
os.remove(temp_wav)
return result["text"]
except Exception:
return "❌ Transcription failed"
# UI
with gr.Blocks(css="""
body { background: white; font-family: 'Segoe UI', sans-serif; }
.gr-button, .gr-textbox { background: #fff; color: #000; }
textarea, input[type='text'] { border: 1px solid #ccc; border-radius: 20px; padding: 10px; height: 48px; }
.sidebar { background: #f5f5f5; height: 100%; overflow-y: auto; padding: 10px; }
.sidebar button { width: 100%; margin: 5px 0; text-align: left; border: 1px solid #ccc; border-radius: 5px; background: white; }
""") as demo:
state = gr.State([])
recording = gr.State(False)
with gr.Row():
with gr.Column(scale=1):
gr.Markdown("<h2 style='text-align:center;'>Chats</h2>")
new_chat = gr.Button("πŸ†• New Chat")
saved_chats_container = gr.Column(elem_id="chat_list")
with gr.Column(scale=3):
gr.Markdown("<h1 style='text-align:center;'>Neobot</h1>")
chatbot = gr.Chatbot(height=400, label="Neobot")
with gr.Row():
chat_input = gr.Textbox(placeholder="Type or speak here...", scale=8, show_label=False)
plus = gr.Button("βž•")
mic = gr.Button("πŸŽ™οΈ")
file_input = gr.File(file_types=[".mp3", ".wav"], visible=False)
send = gr.Button("Send πŸš€")
send.click(chat_with_groq, inputs=[chat_input, state], outputs=[chat_input, chatbot, state, saved_chats_container])
chat_input.submit(chat_with_groq, inputs=[chat_input, state], outputs=[chat_input, chatbot, state, saved_chats_container])
def refresh_chat_list():
files = list_saved_chats()
buttons = []
for file in files:
btn = gr.Button(value=file[:-5], scale=1)
btn.click(fn=load_chat_file, inputs=[gr.State(file)], outputs=[chatbot, state])
buttons.append(btn)
return buttons
def start_stop_recording(is_recording):
if not is_recording:
return "Recording...", True
else:
return "", False
def handle_audio_transcription(audio):
return transcribe_audio(audio)
new_chat.click(lambda: ("", [], []), outputs=[chat_input, chatbot, state])
plus.click(lambda: gr.update(visible=True), outputs=[file_input])
file_input.change(transcribe_audio, inputs=file_input, outputs=chat_input)
mic.click(start_stop_recording, inputs=recording, outputs=[chat_input, recording])
demo.load(refresh_chat_list, outputs=[saved_chats_container])
demo.launch()