File size: 4,951 Bytes
d1b380c
 
f33a336
 
 
 
bea35f9
573da01
279522e
0cab78e
299800e
 
f33a336
40fe208
0cab78e
573da01
d1b380c
573da01
40fe208
0cab78e
 
f33a336
0cab78e
 
50e3328
 
5b4e1f9
bea35f9
40fe208
50e3328
0cab78e
50e3328
0cab78e
40fe208
0cab78e
40fe208
0cab78e
40fe208
0cab78e
89f35c4
ec2b48a
 
0cab78e
 
 
ec2b48a
 
 
 
 
 
 
 
 
 
f2047e9
ec2b48a
bea35f9
 
f33a336
 
0cab78e
f33a336
 
 
 
 
 
50e3328
40fe208
f33a336
0cab78e
279522e
40fe208
 
 
 
bea35f9
 
c675b2a
 
 
 
 
0cab78e
 
bea35f9
 
0cab78e
 
f2047e9
c675b2a
0cab78e
bea35f9
 
 
c675b2a
bea35f9
 
c675b2a
40fe208
bea35f9
 
 
 
 
 
c675b2a
bea35f9
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
import os
import json, re, uuid
from datetime import datetime
from pydub import AudioSegment
import whisper
import requests
import gradio as gr
import emoji

# Load Groq API key and Whisper model
groq_key = os.getenv("GROQ_API_KEY")
whisper_model = whisper.load_model("base")

# Emoji filtering
allowed_emojis = {"😊", "πŸ˜„", "πŸ‘", "πŸ€–", "✨", "πŸŽ‰", "πŸ’¬", "πŸ™Œ", "😎", "πŸ“’", "🧠", "βœ…"}
def filter_emojis(text):
    return "".join(char if char not in emoji.EMOJI_DATA or char in allowed_emojis else "" for char in text)

# Save/load
CHAT_DIR = "saved_chats"
os.makedirs(CHAT_DIR, exist_ok=True)

def save_chat_auto(history):
    prompt = next((m["content"] for m in history if m["role"] == "user"), "Chat")
    title = re.sub(r"[^\w\s]", "", prompt).strip()
    title = " ".join(title.split()[:5]) or "Chat"
    timestamp = datetime.now().strftime("%b %d %Y %H-%M-%S")
    filename = f"{title} - {timestamp} - {uuid.uuid4().hex[:6]}.json"
    with open(os.path.join(CHAT_DIR, filename), "w", encoding="utf-8") as f:
        json.dump(history, f, indent=2, ensure_ascii=False)
    return filename

def list_saved_chats():
    return sorted([f for f in os.listdir(CHAT_DIR) if f.endswith(".json")])

def load_chat_file(filename):
    try:
        with open(os.path.join(CHAT_DIR, filename), "r", encoding="utf-8") as f:
            history = json.load(f)
        return history, history
    except Exception as e:
        print(f"Load error: {e}")
        return [], []

def chat_with_groq(message, history):
    try:
        messages = [{"role": "system", "content": "You are Neobot – a helpful, professional assistant."}]
        messages += history + [{"role": "user", "content": message}]
        headers = {"Authorization": f"Bearer {groq_key}", "Content-Type": "application/json"}
        payload = {"model": "llama3-70b-8192", "messages": messages}
        res = requests.post("https://api.groq.com/openai/v1/chat/completions", headers=headers, json=payload)
        reply = res.json()["choices"][0]["message"]["content"]
        reply = filter_emojis(reply)
        history += [{"role": "user", "content": message}, {"role": "assistant", "content": reply}]
        save_chat_auto(history)
        return "", history, history
    except Exception as e:
        error_msg = f"❌ Error: {str(e)}"
        return "", history + [{"role": "assistant", "content": error_msg}], history

def transcribe_audio(audio_path):
    if not audio_path or not os.path.exists(audio_path): return ""
    try:
        temp_wav = f"{uuid.uuid4()}.wav"
        AudioSegment.from_file(audio_path).export(temp_wav, format="wav")
        result = whisper_model.transcribe(temp_wav)
        os.remove(temp_wav)
        return result["text"]
    except Exception:
        return "❌ Transcription failed"

# UI
with gr.Blocks(css="""
body { background: white; font-family: 'Segoe UI', sans-serif; }
.gr-button, .gr-textbox { background: #fff; color: #000; }
textarea, input[type='text'] { border: 1px solid #ccc; border-radius: 20px; padding: 10px; height: 48px; }
.sidebar { background: #f5f5f5; height: 100%; overflow-y: auto; padding: 10px; }
.sidebar button { width: 100%; margin: 5px 0; text-align: left; border: 1px solid #ccc; border-radius: 8px; background: white; transition: all 0.2s ease; }
.sidebar button:hover { background: #eee; }
""") as demo:

    state = gr.State([])
    with gr.Row():
        with gr.Column(scale=1):
            gr.Markdown("<h2 style='text-align:center;'>Chats</h2>")
            new_chat = gr.Button("πŸ†• New Chat")
            chat_selector = gr.Dropdown(choices=list_saved_chats(), label="Load Saved Chat")
            load_btn = gr.Button("πŸ“‚ Load")
        with gr.Column(scale=3):
            gr.Markdown("<h1 style='text-align:center;'>Neobot</h1>")
            chatbot = gr.Chatbot(height=400, label="Neobot", type="messages")
            with gr.Row():
                chat_input = gr.Textbox(placeholder="Type or speak here...", scale=8, show_label=False)
                send_btn = gr.Button("πŸš€ Send")
            audio_input = gr.Audio(source="microphone", type="filepath", label="🎀 Voice Input")
            file_input = gr.File(file_types=[".mp3", ".wav"], label="Upload Audio")

    def refresh_chat_list():
        return gr.update(choices=list_saved_chats())

    new_chat.click(lambda: ("", [], []), outputs=[chat_input, chatbot, state])
    send_btn.click(chat_with_groq, inputs=[chat_input, state], outputs=[chat_input, chatbot, state])
    chat_input.submit(chat_with_groq, inputs=[chat_input, state], outputs=[chat_input, chatbot, state])
    file_input.change(transcribe_audio, inputs=file_input, outputs=[chat_input])
    audio_input.change(transcribe_audio, inputs=audio_input, outputs=[chat_input])
    load_btn.click(lambda name: load_chat_file(name), inputs=[chat_selector], outputs=[chatbot, state])
    demo.load(refresh_chat_list, outputs=[chat_selector])

demo.launch()