File size: 5,593 Bytes
d1b380c
0cab78e
d1b380c
279522e
d1b380c
f33a336
 
 
 
573da01
279522e
0cab78e
299800e
 
f33a336
40fe208
0cab78e
573da01
d1b380c
573da01
40fe208
0cab78e
 
f33a336
0cab78e
 
50e3328
 
5b4e1f9
50e3328
40fe208
50e3328
0cab78e
50e3328
0cab78e
40fe208
0cab78e
40fe208
0cab78e
40fe208
0cab78e
5b4e1f9
 
ec2b48a
 
0cab78e
 
40fe208
0cab78e
ec2b48a
 
 
40fe208
ec2b48a
 
40fe208
ec2b48a
 
 
40fe208
ec2b48a
 
 
 
 
 
 
f33a336
0cab78e
f33a336
0cab78e
f33a336
 
 
 
 
 
50e3328
40fe208
f33a336
0cab78e
279522e
40fe208
 
 
 
 
c675b2a
 
 
5b4e1f9
c675b2a
 
 
0cab78e
 
ec2b48a
c675b2a
0cab78e
 
 
c675b2a
0cab78e
 
 
40fe208
0cab78e
 
ec2b48a
 
c675b2a
40fe208
ec2b48a
 
 
 
 
 
 
c675b2a
40fe208
 
ec2b48a
5b4e1f9
ec2b48a
c675b2a
ec2b48a
 
40fe208
 
 
 
ec2b48a
c675b2a
ec2b48a
c675b2a
ec2b48a
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
import os
os.system("pip install -q openai-whisper")

import gradio as gr
import json, re, uuid
from datetime import datetime
from pydub import AudioSegment
import whisper
import requests
import emoji

# Load Groq API key and Whisper model
groq_key = os.getenv("GROQ_API_KEY")
whisper_model = whisper.load_model("base")

# Emoji filtering
allowed_emojis = {"😊", "πŸ˜„", "πŸ‘", "πŸ€–", "✨", "πŸŽ‰", "πŸ’¬", "πŸ™Œ", "😎", "πŸ“’", "🧠", "βœ…"}
def filter_emojis(text):
    return "".join(char if char not in emoji.EMOJI_DATA or char in allowed_emojis else "" for char in text)

# Save/load
CHAT_DIR = "saved_chats"
os.makedirs(CHAT_DIR, exist_ok=True)

def save_chat_auto(history):
    prompt = next((m["content"] for m in history if m["role"] == "user"), "Chat")
    title = re.sub(r"[^\w\s]", "", prompt).strip()
    title = " ".join(title.split()[:5]) or "Chat"
    timestamp = datetime.now().strftime("%b %d %Y %H-%M-%S")
    filename = f"{title} - {timestamp}.json"
    with open(os.path.join(CHAT_DIR, filename), "w", encoding="utf-8") as f:
        json.dump(history, f, indent=2, ensure_ascii=False)
    return filename

def list_saved_chats():
    return sorted([f for f in os.listdir(CHAT_DIR) if f.endswith(".json")])

def load_chat_file(filename):
    try:
        with open(os.path.join(CHAT_DIR, filename), "r", encoding="utf-8") as f:
            history = json.load(f)
        chat_display = [(m["content"], None) if m["role"] == "user" else (None, m["content"]) for m in history]
        return chat_display, history
    except Exception as e:
        print(f"Load error: {e}")
        return [], []

# Chat handler
def chat_with_groq(message, history):
    try:
        messages = [{"role": "system", "content": "You are Neobot – a helpful, professional assistant."}]
        messages += history + [{"role": "user", "content": message}]

        headers = {"Authorization": f"Bearer {groq_key}", "Content-Type": "application/json"}
        payload = {"model": "llama3-70b-8192", "messages": messages}

        res = requests.post("https://api.groq.com/openai/v1/chat/completions", headers=headers, json=payload)
        reply = res.json()["choices"][0]["message"]["content"]
        reply = filter_emojis(reply)

        history += [{"role": "user", "content": message}, {"role": "assistant", "content": reply}]
        save_chat_auto(history)
        chat_display = [(m["content"], None) if m["role"] == "user" else (None, m["content"]) for m in history]
        return "", chat_display, history, refresh_chat_list()
    except Exception as e:
        print(f"Chat error: {e}")
        return "", [(None, "❌ Error occurred while responding.")], history, refresh_chat_list()

# Transcribe audio
def transcribe_audio(audio_path):
    if not audio_path or not os.path.exists(audio_path): return ""
    try:
        temp_wav = f"{uuid.uuid4()}.wav"
        AudioSegment.from_file(audio_path).export(temp_wav, format="wav")
        result = whisper_model.transcribe(temp_wav)
        os.remove(temp_wav)
        return result["text"]
    except Exception:
        return "❌ Transcription failed"

# UI
with gr.Blocks(css="""
body { background: white; font-family: 'Segoe UI', sans-serif; }
.gr-button, .gr-textbox { background: #fff; color: #000; }
textarea, input[type='text'] { border: 1px solid #ccc; border-radius: 20px; padding: 10px; height: 48px; }
.sidebar { background: #f5f5f5; height: 100%; overflow-y: auto; padding: 10px; }
.sidebar button { width: 100%; margin: 5px 0; text-align: left; border: 1px solid #ccc; border-radius: 5px; background: white; }
""") as demo:

    state = gr.State([])
    recording = gr.State(False)

    with gr.Row():
        with gr.Column(scale=1):
            gr.Markdown("<h2 style='text-align:center;'>Chats</h2>")
            new_chat = gr.Button("πŸ†• New Chat")
            saved_chats_container = gr.Column(elem_id="chat_list")

        with gr.Column(scale=3):
            gr.Markdown("<h1 style='text-align:center;'>Neobot</h1>")
            chatbot = gr.Chatbot(height=400, label="Neobot")
            with gr.Row():
                chat_input = gr.Textbox(placeholder="Type or speak here...", scale=8, show_label=False)
                plus = gr.Button("βž•")
                mic = gr.Button("πŸŽ™οΈ")
            file_input = gr.File(file_types=[".mp3", ".wav"], visible=False)

    send = gr.Button("Send πŸš€")
    send.click(chat_with_groq, inputs=[chat_input, state], outputs=[chat_input, chatbot, state, saved_chats_container])
    chat_input.submit(chat_with_groq, inputs=[chat_input, state], outputs=[chat_input, chatbot, state, saved_chats_container])

    def refresh_chat_list():
        files = list_saved_chats()
        buttons = []
        for file in files:
            btn = gr.Button(value=file[:-5], scale=1)
            btn.click(fn=load_chat_file, inputs=[gr.State(file)], outputs=[chatbot, state])
            buttons.append(btn)
        return buttons

    def start_stop_recording(is_recording):
        if not is_recording:
            return "Recording...", True
        else:
            return "", False

    def handle_audio_transcription(audio):
        return transcribe_audio(audio)

    new_chat.click(lambda: ("", [], []), outputs=[chat_input, chatbot, state])
    plus.click(lambda: gr.update(visible=True), outputs=[file_input])
    file_input.change(transcribe_audio, inputs=file_input, outputs=chat_input)
    mic.click(start_stop_recording, inputs=recording, outputs=[chat_input, recording])

    demo.load(refresh_chat_list, outputs=[saved_chats_container])

demo.launch()