Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -9,20 +9,18 @@ import whisper
|
|
9 |
import requests
|
10 |
import emoji
|
11 |
|
12 |
-
# Load API key
|
13 |
groq_key = os.getenv("GROQ_API_KEY")
|
14 |
whisper_model = whisper.load_model("base")
|
15 |
|
16 |
-
# β
Safe emoji filtering
|
17 |
def filter_emojis(text):
|
18 |
allowed_emojis = {"π", "π", "π", "π€", "β¨", "π", "π¬", "π", "π", "π’", "π§ ", "β
"}
|
19 |
return "".join(char if char not in emoji.EMOJI_DATA or char in allowed_emojis else "" for char in text)
|
20 |
|
21 |
-
# β
Chat handler
|
22 |
def chat_with_groq(message, history):
|
23 |
-
messages = [{"role": "system", "content": "You are
|
24 |
messages += history + [{"role": "user", "content": message}]
|
25 |
-
|
26 |
headers = {
|
27 |
"Authorization": f"Bearer {groq_key}",
|
28 |
"Content-Type": "application/json"
|
@@ -40,7 +38,6 @@ def chat_with_groq(message, history):
|
|
40 |
history += [{"role": "user", "content": message}, {"role": "assistant", "content": reply}]
|
41 |
return "", history, history
|
42 |
|
43 |
-
# β
Transcription
|
44 |
def transcribe_audio(audio_path):
|
45 |
if audio_path is None or not os.path.exists(audio_path):
|
46 |
return "β οΈ No audio input."
|
@@ -53,7 +50,6 @@ def transcribe_audio(audio_path):
|
|
53 |
except Exception as e:
|
54 |
return f"β Transcription error: {e}"
|
55 |
|
56 |
-
# β
Save/load
|
57 |
def save_session(history):
|
58 |
prompt = next((m["content"] for m in history if m["role"] == "user"), "chat")
|
59 |
title = re.sub(r"[^\w\s]", "", prompt).strip()
|
@@ -76,59 +72,54 @@ def load_chat(name):
|
|
76 |
except Exception as e:
|
77 |
return [], [], f"β Load error: {e}"
|
78 |
|
79 |
-
#
|
80 |
with gr.Blocks(css="""
|
81 |
-
body { background-color: #
|
82 |
-
.gr-chatbot { background-color: #
|
83 |
-
.gr-chatbot-message { background-color: #
|
84 |
textarea, input[type='text'] {
|
85 |
-
background-color: #
|
86 |
-
border: 1px solid #
|
87 |
}
|
88 |
.gr-button {
|
89 |
-
background-color: #
|
90 |
-
border: 1px solid #
|
91 |
-
font-weight: bold; transition: background-color 0.2s;
|
92 |
}
|
93 |
-
.gr-button:hover { background-color: #
|
94 |
-
.gr-dropdown { background-color: #
|
95 |
-
h1, h2, h3, .gr-markdown { color: #
|
96 |
""") as demo:
|
97 |
|
98 |
state = gr.State([])
|
99 |
|
100 |
-
gr.Markdown("# β¨ JAWERIA'SBOT π€")
|
101 |
-
gr.Markdown("Type or speak β your assistant listens and responds π¬")
|
102 |
-
|
103 |
-
chatbot = gr.Chatbot(label="JAWERIA'SBOT π€", height=400, type="messages")
|
104 |
-
|
105 |
with gr.Row():
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
|
|
|
|
|
|
|
|
|
124 |
send_btn.click(chat_with_groq, inputs=[chat_input, state], outputs=[chat_input, chatbot, state])
|
125 |
chat_input.submit(chat_with_groq, inputs=[chat_input, state], outputs=[chat_input, chatbot, state])
|
126 |
recording.change(transcribe_audio, inputs=recording, outputs=chat_input)
|
127 |
upload_btn.change(transcribe_audio, inputs=upload_btn, outputs=chat_input)
|
128 |
|
129 |
-
def toggle_recording(current):
|
130 |
-
return gr.update(visible=not current)
|
131 |
-
|
132 |
record_btn.click(lambda: gr.update(visible=True), None, recording)
|
133 |
|
134 |
new_chat_btn.click(fn=lambda: ("", [], []), outputs=[chat_input, chatbot, state])
|
@@ -138,4 +129,3 @@ h1, h2, h3, .gr-markdown { color: #f1f1f1 !important; text-align: center; }
|
|
138 |
demo.load(fn=list_saved_files, outputs=[saved_dropdown])
|
139 |
|
140 |
demo.launch()
|
141 |
-
|
|
|
9 |
import requests
|
10 |
import emoji
|
11 |
|
12 |
+
# Load API key and model
|
13 |
groq_key = os.getenv("GROQ_API_KEY")
|
14 |
whisper_model = whisper.load_model("base")
|
15 |
|
|
|
16 |
def filter_emojis(text):
|
17 |
allowed_emojis = {"π", "π", "π", "π€", "β¨", "π", "π¬", "π", "π", "π’", "π§ ", "β
"}
|
18 |
return "".join(char if char not in emoji.EMOJI_DATA or char in allowed_emojis else "" for char in text)
|
19 |
|
|
|
20 |
def chat_with_groq(message, history):
|
21 |
+
messages = [{"role": "system", "content": "You are NovaBot π€ β helpful, professional, and emoji-friendly."}]
|
22 |
messages += history + [{"role": "user", "content": message}]
|
23 |
+
|
24 |
headers = {
|
25 |
"Authorization": f"Bearer {groq_key}",
|
26 |
"Content-Type": "application/json"
|
|
|
38 |
history += [{"role": "user", "content": message}, {"role": "assistant", "content": reply}]
|
39 |
return "", history, history
|
40 |
|
|
|
41 |
def transcribe_audio(audio_path):
|
42 |
if audio_path is None or not os.path.exists(audio_path):
|
43 |
return "β οΈ No audio input."
|
|
|
50 |
except Exception as e:
|
51 |
return f"β Transcription error: {e}"
|
52 |
|
|
|
53 |
def save_session(history):
|
54 |
prompt = next((m["content"] for m in history if m["role"] == "user"), "chat")
|
55 |
title = re.sub(r"[^\w\s]", "", prompt).strip()
|
|
|
72 |
except Exception as e:
|
73 |
return [], [], f"β Load error: {e}"
|
74 |
|
75 |
+
# Gradio Interface
|
76 |
with gr.Blocks(css="""
|
77 |
+
body { background-color: #fff; color: #000; font-family: 'Segoe UI', sans-serif; }
|
78 |
+
.gr-chatbot { background-color: #f9f9f9; border: 1px solid #ccc; border-radius: 10px; }
|
79 |
+
.gr-chatbot-message { background-color: #f0f0f0; color: #000; border-radius: 10px; margin-bottom: 5px; }
|
80 |
textarea, input[type='text'] {
|
81 |
+
background-color: #fff; color: #000; border-radius: 20px;
|
82 |
+
border: 1px solid #ccc; padding: 10px 15px; height: 48px;
|
83 |
}
|
84 |
.gr-button {
|
85 |
+
background-color: #eaeaea !important; color: #000 !important;
|
86 |
+
border: 1px solid #ccc !important; border-radius: 8px;
|
|
|
87 |
}
|
88 |
+
.gr-button:hover { background-color: #ddd !important; cursor: pointer; }
|
89 |
+
.gr-dropdown { background-color: #fff !important; color: #000 !important; max-height: 200px; overflow-y: auto; }
|
90 |
+
h1, h2, h3, .gr-markdown { color: #000 !important; }
|
91 |
""") as demo:
|
92 |
|
93 |
state = gr.State([])
|
94 |
|
|
|
|
|
|
|
|
|
|
|
95 |
with gr.Row():
|
96 |
+
with gr.Column(scale=1):
|
97 |
+
gr.Markdown("## ποΈ Chats")
|
98 |
+
new_chat_btn = gr.Button("π New Chat")
|
99 |
+
saved_dropdown = gr.Dropdown(label="π Saved Conversations", choices=list_saved_files(), interactive=True)
|
100 |
+
load_btn = gr.Button("π₯ Load Chat")
|
101 |
+
save_btn = gr.Button("πΎ Save Chat")
|
102 |
+
save_msg = gr.Markdown()
|
103 |
+
load_msg = gr.Markdown()
|
104 |
+
|
105 |
+
with gr.Column(scale=3):
|
106 |
+
gr.Markdown("# π€ NovaBot")
|
107 |
+
chatbot = gr.Chatbot(label="NovaBot Chat", height=400, type="messages")
|
108 |
+
|
109 |
+
with gr.Row():
|
110 |
+
chat_input = gr.Textbox(placeholder="Type your message...", scale=8, show_label=False)
|
111 |
+
record_btn = gr.Button("ποΈ", scale=1)
|
112 |
+
upload_btn = gr.File(file_types=[".mp3", ".wav", ".m4a"], label="π Upload", visible=False)
|
113 |
+
|
114 |
+
send_btn = gr.Button("Send π")
|
115 |
+
recording = gr.Audio(type="filepath", visible=False)
|
116 |
+
|
117 |
+
# Events
|
118 |
send_btn.click(chat_with_groq, inputs=[chat_input, state], outputs=[chat_input, chatbot, state])
|
119 |
chat_input.submit(chat_with_groq, inputs=[chat_input, state], outputs=[chat_input, chatbot, state])
|
120 |
recording.change(transcribe_audio, inputs=recording, outputs=chat_input)
|
121 |
upload_btn.change(transcribe_audio, inputs=upload_btn, outputs=chat_input)
|
122 |
|
|
|
|
|
|
|
123 |
record_btn.click(lambda: gr.update(visible=True), None, recording)
|
124 |
|
125 |
new_chat_btn.click(fn=lambda: ("", [], []), outputs=[chat_input, chatbot, state])
|
|
|
129 |
demo.load(fn=list_saved_files, outputs=[saved_dropdown])
|
130 |
|
131 |
demo.launch()
|
|