Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -4,6 +4,8 @@ import edge_tts
|
|
4 |
import time
|
5 |
import os
|
6 |
import uuid
|
|
|
|
|
7 |
import firebase_admin
|
8 |
from firebase_admin import credentials, firestore
|
9 |
from openai import OpenAI
|
@@ -153,6 +155,22 @@ def display_chat_history():
|
|
153 |
st.markdown('<div class="chat-history-wrapper">' + "".join(chat_msgs) + '</div>', unsafe_allow_html=True)
|
154 |
st.markdown('<div id="chat-top-anchor"></div>', unsafe_allow_html=True)
|
155 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
156 |
# --- Edge TTS synth ---
|
157 |
async def edge_tts_synthesize(text, voice, user_id):
|
158 |
out_path = f"output_{user_id}.mp3"
|
@@ -197,6 +215,7 @@ window.setTimeout(function(){
|
|
197 |
</script>
|
198 |
""", unsafe_allow_html=True)
|
199 |
|
|
|
200 |
if user_input:
|
201 |
thread_id = get_or_create_thread_id()
|
202 |
client.beta.threads.messages.create(thread_id=thread_id, role="user", content=user_input)
|
@@ -216,7 +235,8 @@ if user_input:
|
|
216 |
mute_voice = st.session_state.get("mute_voice", False)
|
217 |
audio_path = None
|
218 |
if not mute_voice and assistant_message.strip():
|
219 |
-
|
|
|
220 |
st.session_state["last_audio_path"] = audio_path
|
221 |
|
222 |
time.sleep(0.2)
|
|
|
4 |
import time
|
5 |
import os
|
6 |
import uuid
|
7 |
+
import re
|
8 |
+
import html
|
9 |
import firebase_admin
|
10 |
from firebase_admin import credentials, firestore
|
11 |
from openai import OpenAI
|
|
|
155 |
st.markdown('<div class="chat-history-wrapper">' + "".join(chat_msgs) + '</div>', unsafe_allow_html=True)
|
156 |
st.markdown('<div id="chat-top-anchor"></div>', unsafe_allow_html=True)
|
157 |
|
158 |
+
# --- TTS sanitize ---
|
159 |
+
def sanitize_for_tts(text):
|
160 |
+
text = html.unescape(text)
|
161 |
+
text = re.sub(r'[^\x00-\x7F]+', ' ', text)
|
162 |
+
text = re.sub(r'\[([^\]]+)\]\([^\)]+\)', r'\1', text)
|
163 |
+
text = re.sub(r'(\*\*|__)(.*?)\1', r'\2', text)
|
164 |
+
text = re.sub(r'(\*|_)(.*?)\1', r'\2', text)
|
165 |
+
text = re.sub(r'^#{1,6}\s+', '', text, flags=re.MULTILINE)
|
166 |
+
text = re.sub(r'^\s*[-*+]\s+', ' • ', text, flags=re.MULTILINE)
|
167 |
+
text = re.sub(r'^\s*\d+\.\s+', ' • ', text, flags=re.MULTILINE)
|
168 |
+
text = re.sub(r'[!?]{2,}', '.', text)
|
169 |
+
text = re.sub(r'\.{3,}', '.', text)
|
170 |
+
text = re.sub(r'\n{2,}', '. ', text)
|
171 |
+
text = re.sub(r'\s+', ' ', text).strip()
|
172 |
+
return text
|
173 |
+
|
174 |
# --- Edge TTS synth ---
|
175 |
async def edge_tts_synthesize(text, voice, user_id):
|
176 |
out_path = f"output_{user_id}.mp3"
|
|
|
215 |
</script>
|
216 |
""", unsafe_allow_html=True)
|
217 |
|
218 |
+
# --- Handle user input
|
219 |
if user_input:
|
220 |
thread_id = get_or_create_thread_id()
|
221 |
client.beta.threads.messages.create(thread_id=thread_id, role="user", content=user_input)
|
|
|
235 |
mute_voice = st.session_state.get("mute_voice", False)
|
236 |
audio_path = None
|
237 |
if not mute_voice and assistant_message.strip():
|
238 |
+
clean_text = sanitize_for_tts(assistant_message)
|
239 |
+
audio_path = synthesize_voice(clean_text, st.session_state["selected_voice"], user_id)
|
240 |
st.session_state["last_audio_path"] = audio_path
|
241 |
|
242 |
time.sleep(0.2)
|