Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -4,6 +4,8 @@ import edge_tts
|
|
4 |
import time
|
5 |
import os
|
6 |
import uuid
|
|
|
|
|
7 |
import firebase_admin
|
8 |
from firebase_admin import credentials, firestore
|
9 |
from openai import OpenAI
|
@@ -64,7 +66,6 @@ st.markdown("""
|
|
64 |
margin-top: 0.1em;
|
65 |
position: relative;
|
66 |
}
|
67 |
-
|
68 |
.clear-chat-btn-top {
|
69 |
position: absolute;
|
70 |
top: 10px;
|
@@ -77,21 +78,17 @@ st.markdown("""
|
|
77 |
z-index: 1000;
|
78 |
transition: color 0.2s ease;
|
79 |
}
|
80 |
-
|
81 |
.clear-chat-btn-top:hover {
|
82 |
color: #fff;
|
83 |
}
|
84 |
-
|
85 |
.stChatMessage { max-width: 85%; border-radius: 12px; padding: 8px; margin-bottom: 10px; }
|
86 |
.stChatMessage[data-testid="stChatMessage-user"] { background: #f0f0f0; color: #000000; }
|
87 |
.stChatMessage[data-testid="stChatMessage-assistant"] { background: #e3f2fd; color: #000000; }
|
88 |
-
|
89 |
.chat-history-wrapper {
|
90 |
margin-top: 0.5em;
|
91 |
-
padding-bottom: 9em;
|
92 |
min-height: 60vh;
|
93 |
}
|
94 |
-
|
95 |
.input-bottom-bar {
|
96 |
position: fixed;
|
97 |
bottom: 3.5em;
|
@@ -181,6 +178,22 @@ def display_chat_history():
|
|
181 |
st.markdown('<div class="chat-history-wrapper">' + "".join(chat_msgs) + '</div>', unsafe_allow_html=True)
|
182 |
st.markdown('<div id="chat-top-anchor"></div>', unsafe_allow_html=True)
|
183 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
184 |
# --- Edge TTS synth ---
|
185 |
async def edge_tts_synthesize(text, voice, user_id):
|
186 |
out_path = f"output_{user_id}.mp3"
|
@@ -242,7 +255,8 @@ if user_input:
|
|
242 |
mute_voice = st.session_state.get("mute_voice", False)
|
243 |
audio_path = None
|
244 |
if not mute_voice and assistant_message.strip():
|
245 |
-
|
|
|
246 |
st.session_state["last_audio_path"] = audio_path
|
247 |
|
248 |
time.sleep(0.2)
|
|
|
4 |
import time
|
5 |
import os
|
6 |
import uuid
|
7 |
+
import re
|
8 |
+
import html
|
9 |
import firebase_admin
|
10 |
from firebase_admin import credentials, firestore
|
11 |
from openai import OpenAI
|
|
|
66 |
margin-top: 0.1em;
|
67 |
position: relative;
|
68 |
}
|
|
|
69 |
.clear-chat-btn-top {
|
70 |
position: absolute;
|
71 |
top: 10px;
|
|
|
78 |
z-index: 1000;
|
79 |
transition: color 0.2s ease;
|
80 |
}
|
|
|
81 |
.clear-chat-btn-top:hover {
|
82 |
color: #fff;
|
83 |
}
|
|
|
84 |
.stChatMessage { max-width: 85%; border-radius: 12px; padding: 8px; margin-bottom: 10px; }
|
85 |
.stChatMessage[data-testid="stChatMessage-user"] { background: #f0f0f0; color: #000000; }
|
86 |
.stChatMessage[data-testid="stChatMessage-assistant"] { background: #e3f2fd; color: #000000; }
|
|
|
87 |
.chat-history-wrapper {
|
88 |
margin-top: 0.5em;
|
89 |
+
padding-bottom: 9em;
|
90 |
min-height: 60vh;
|
91 |
}
|
|
|
92 |
.input-bottom-bar {
|
93 |
position: fixed;
|
94 |
bottom: 3.5em;
|
|
|
178 |
st.markdown('<div class="chat-history-wrapper">' + "".join(chat_msgs) + '</div>', unsafe_allow_html=True)
|
179 |
st.markdown('<div id="chat-top-anchor"></div>', unsafe_allow_html=True)
|
180 |
|
181 |
+
# --- TTS sanitization ---
|
182 |
+
def sanitize_for_tts(text):
|
183 |
+
text = html.unescape(text)
|
184 |
+
text = re.sub(r'[^\x00-\x7F]+', ' ', text)
|
185 |
+
text = re.sub(r'\[([^\]]+)\]\([^\)]+\)', r'\1', text)
|
186 |
+
text = re.sub(r'(\*\*|__)(.*?)\1', r'\2', text)
|
187 |
+
text = re.sub(r'(\*|_)(.*?)\1', r'\2', text)
|
188 |
+
text = re.sub(r'^#{1,6}\s+', '', text, flags=re.MULTILINE)
|
189 |
+
text = re.sub(r'^\s*[-*+]\s+', ' • ', text, flags=re.MULTILINE)
|
190 |
+
text = re.sub(r'^\s*\d+\.\s+', ' • ', text, flags=re.MULTILINE)
|
191 |
+
text = re.sub(r'[!?]{2,}', '.', text)
|
192 |
+
text = re.sub(r'\.{3,}', '.', text)
|
193 |
+
text = re.sub(r'\n{2,}', '. ', text)
|
194 |
+
text = re.sub(r'\s+', ' ', text).strip()
|
195 |
+
return text
|
196 |
+
|
197 |
# --- Edge TTS synth ---
|
198 |
async def edge_tts_synthesize(text, voice, user_id):
|
199 |
out_path = f"output_{user_id}.mp3"
|
|
|
255 |
mute_voice = st.session_state.get("mute_voice", False)
|
256 |
audio_path = None
|
257 |
if not mute_voice and assistant_message.strip():
|
258 |
+
clean_text = sanitize_for_tts(assistant_message)
|
259 |
+
audio_path = synthesize_voice(clean_text, st.session_state["selected_voice"], user_id)
|
260 |
st.session_state["last_audio_path"] = audio_path
|
261 |
|
262 |
time.sleep(0.2)
|