Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -41,25 +41,30 @@ def load_chat_file(filename):
|
|
41 |
history = json.load(f)
|
42 |
chat_display = [(m["content"], None) if m["role"] == "user" else (None, m["content"]) for m in history]
|
43 |
return chat_display, history
|
44 |
-
except:
|
|
|
45 |
return [], []
|
46 |
|
47 |
# Chat handler
|
48 |
def chat_with_groq(message, history):
|
49 |
-
|
50 |
-
|
|
|
51 |
|
52 |
-
|
53 |
-
|
54 |
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
|
|
|
|
|
|
63 |
|
64 |
# Transcribe audio
|
65 |
def transcribe_audio(audio_path):
|
@@ -89,7 +94,7 @@ textarea, input[type='text'] { border: 1px solid #ccc; border-radius: 20px; padd
|
|
89 |
with gr.Column(scale=1):
|
90 |
gr.Markdown("<h2 style='text-align:center;'>Chats</h2>")
|
91 |
new_chat = gr.Button("π New Chat")
|
92 |
-
|
93 |
|
94 |
with gr.Column(scale=3):
|
95 |
gr.Markdown("<h1 style='text-align:center;'>Neobot</h1>")
|
@@ -98,35 +103,35 @@ textarea, input[type='text'] { border: 1px solid #ccc; border-radius: 20px; padd
|
|
98 |
chat_input = gr.Textbox(placeholder="Type or speak here...", scale=8, show_label=False)
|
99 |
plus = gr.Button("β")
|
100 |
mic = gr.Button("ποΈ")
|
101 |
-
hidden_audio = gr.Audio(type="filepath", visible=False)
|
102 |
file_input = gr.File(file_types=[".mp3", ".wav"], visible=False)
|
103 |
|
104 |
send = gr.Button("Send π")
|
105 |
-
send.click(chat_with_groq, inputs=[chat_input, state], outputs=[chat_input, chatbot, state,
|
106 |
-
chat_input.submit(chat_with_groq, inputs=[chat_input, state], outputs=[chat_input, chatbot, state,
|
107 |
|
108 |
def refresh_chat_list():
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
|
|
|
|
114 |
|
115 |
def start_stop_recording(is_recording):
|
116 |
if not is_recording:
|
117 |
-
return "Recording...", True
|
118 |
else:
|
119 |
-
return "", False
|
120 |
|
121 |
-
def
|
122 |
-
return transcribe_audio(audio)
|
123 |
|
124 |
new_chat.click(lambda: ("", [], []), outputs=[chat_input, chatbot, state])
|
125 |
plus.click(lambda: gr.update(visible=True), outputs=[file_input])
|
126 |
file_input.change(transcribe_audio, inputs=file_input, outputs=chat_input)
|
127 |
-
mic.click(start_stop_recording, inputs=recording, outputs=[chat_input, recording
|
128 |
-
hidden_audio.change(handle_audio, inputs=hidden_audio, outputs=[chat_input, hidden_audio])
|
129 |
|
130 |
-
demo.load(refresh_chat_list, outputs=[
|
131 |
|
132 |
-
demo.launch()
|
|
|
41 |
history = json.load(f)
|
42 |
chat_display = [(m["content"], None) if m["role"] == "user" else (None, m["content"]) for m in history]
|
43 |
return chat_display, history
|
44 |
+
except Exception as e:
|
45 |
+
print(f"Load error: {e}")
|
46 |
return [], []
|
47 |
|
48 |
# Chat handler
|
49 |
def chat_with_groq(message, history):
|
50 |
+
try:
|
51 |
+
messages = [{"role": "system", "content": "You are Neobot β a helpful, professional assistant."}]
|
52 |
+
messages += history + [{"role": "user", "content": message}]
|
53 |
|
54 |
+
headers = {"Authorization": f"Bearer {groq_key}", "Content-Type": "application/json"}
|
55 |
+
payload = {"model": "llama3-70b-8192", "messages": messages}
|
56 |
|
57 |
+
res = requests.post("https://api.groq.com/openai/v1/chat/completions", headers=headers, json=payload)
|
58 |
+
reply = res.json()["choices"][0]["message"]["content"]
|
59 |
+
reply = filter_emojis(reply)
|
60 |
|
61 |
+
history += [{"role": "user", "content": message}, {"role": "assistant", "content": reply}]
|
62 |
+
save_chat_auto(history)
|
63 |
+
chat_display = [(m["content"], None) if m["role"] == "user" else (None, m["content"]) for m in history]
|
64 |
+
return "", chat_display, history, refresh_chat_list()
|
65 |
+
except Exception as e:
|
66 |
+
print(f"Chat error: {e}")
|
67 |
+
return "", [(None, "β Error occurred while responding.")], history, refresh_chat_list()
|
68 |
|
69 |
# Transcribe audio
|
70 |
def transcribe_audio(audio_path):
|
|
|
94 |
with gr.Column(scale=1):
|
95 |
gr.Markdown("<h2 style='text-align:center;'>Chats</h2>")
|
96 |
new_chat = gr.Button("π New Chat")
|
97 |
+
saved_chats_container = gr.Column(elem_id="chat_list")
|
98 |
|
99 |
with gr.Column(scale=3):
|
100 |
gr.Markdown("<h1 style='text-align:center;'>Neobot</h1>")
|
|
|
103 |
chat_input = gr.Textbox(placeholder="Type or speak here...", scale=8, show_label=False)
|
104 |
plus = gr.Button("β")
|
105 |
mic = gr.Button("ποΈ")
|
|
|
106 |
file_input = gr.File(file_types=[".mp3", ".wav"], visible=False)
|
107 |
|
108 |
send = gr.Button("Send π")
|
109 |
+
send.click(chat_with_groq, inputs=[chat_input, state], outputs=[chat_input, chatbot, state, saved_chats_container])
|
110 |
+
chat_input.submit(chat_with_groq, inputs=[chat_input, state], outputs=[chat_input, chatbot, state, saved_chats_container])
|
111 |
|
112 |
def refresh_chat_list():
|
113 |
+
files = list_saved_chats()
|
114 |
+
buttons = []
|
115 |
+
for file in files:
|
116 |
+
btn = gr.Button(value=file[:-5], scale=1)
|
117 |
+
btn.click(fn=load_chat_file, inputs=[gr.State(file)], outputs=[chatbot, state])
|
118 |
+
buttons.append(btn)
|
119 |
+
return buttons
|
120 |
|
121 |
def start_stop_recording(is_recording):
|
122 |
if not is_recording:
|
123 |
+
return "Recording...", True
|
124 |
else:
|
125 |
+
return "", False
|
126 |
|
127 |
+
def handle_audio_transcription(audio):
|
128 |
+
return transcribe_audio(audio)
|
129 |
|
130 |
new_chat.click(lambda: ("", [], []), outputs=[chat_input, chatbot, state])
|
131 |
plus.click(lambda: gr.update(visible=True), outputs=[file_input])
|
132 |
file_input.change(transcribe_audio, inputs=file_input, outputs=chat_input)
|
133 |
+
mic.click(start_stop_recording, inputs=recording, outputs=[chat_input, recording])
|
|
|
134 |
|
135 |
+
demo.load(refresh_chat_list, outputs=[saved_chats_container])
|
136 |
|
137 |
+
demo.launch()
|