Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -3,6 +3,7 @@ import asyncio
|
|
3 |
import edge_tts
|
4 |
import time
|
5 |
import os
|
|
|
6 |
import uuid
|
7 |
import firebase_admin
|
8 |
from firebase_admin import credentials, firestore
|
@@ -19,34 +20,24 @@ openai_key = os.getenv("openai_key")
|
|
19 |
assistant_id = os.getenv("assistant_id")
|
20 |
client = OpenAI(api_key=openai_key)
|
21 |
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
"Ryan (UK, Male)": "en-GB-RyanNeural",
|
26 |
-
"Natasha (AU, Female)": "en-AU-NatashaNeural",
|
27 |
-
"William (AU, Male)": "en-AU-WilliamNeural",
|
28 |
-
"Libby (UK, Female)": "en-GB-LibbyNeural",
|
29 |
-
"Leah (SA, Female)": "en-ZA-LeahNeural",
|
30 |
-
"Luke (SA, Male)": "en-ZA-LukeNeural"
|
31 |
-
}
|
32 |
-
|
33 |
-
st.set_page_config(page_title="LOR Technologies AI Assistant", layout="wide")
|
34 |
|
35 |
# --- State setup
|
36 |
if "user_id" not in st.session_state:
|
37 |
st.session_state["user_id"] = str(uuid.uuid4())
|
38 |
user_id = st.session_state["user_id"]
|
39 |
|
40 |
-
if "mute_voice" not in st.session_state:
|
41 |
-
st.session_state["mute_voice"] = False
|
42 |
if "last_tts_text" not in st.session_state:
|
43 |
st.session_state["last_tts_text"] = ""
|
44 |
if "last_audio_path" not in st.session_state:
|
45 |
st.session_state["last_audio_path"] = ""
|
46 |
-
if "selected_voice" not in st.session_state:
|
47 |
-
st.session_state["selected_voice"] = "Jenny (US, Female)"
|
48 |
|
49 |
-
# ---
|
|
|
|
|
|
|
50 |
st.markdown("""
|
51 |
<style>
|
52 |
.block-container {padding-top: 1rem;}
|
@@ -64,21 +55,19 @@ st.markdown("""
|
|
64 |
.stChatMessage[data-testid="stChatMessage-user"] { background: #f0f0f0; color: #000000; }
|
65 |
.stChatMessage[data-testid="stChatMessage-assistant"] { background: #e3f2fd; color: #000000; }
|
66 |
.chat-history-wrapper {
|
67 |
-
margin-top: 0.5em;
|
68 |
}
|
69 |
-
.input-
|
70 |
-
position:
|
71 |
-
background: #191b22; padding: 0.
|
72 |
-
display: flex; align-items: center; gap: 0.
|
73 |
-
border-bottom: 1px solid #22232c;
|
74 |
}
|
75 |
-
.input-
|
76 |
-
.
|
77 |
-
.clear-chat-btn { background: none; border: none; font-size: 1.4em; color: #666; cursor: pointer; }
|
78 |
</style>
|
79 |
""", unsafe_allow_html=True)
|
80 |
|
81 |
-
# --- Top Branding
|
82 |
st.markdown("""
|
83 |
<div class="lor-brand-bar">
|
84 |
<img src="https://lortechnologies.com/wp-content/uploads/2023/03/LOR-Online-Logo.svg" class="logo-mini" />
|
@@ -86,30 +75,6 @@ st.markdown("""
|
|
86 |
</div>
|
87 |
""", unsafe_allow_html=True)
|
88 |
|
89 |
-
# --- Sidebar: audio/voice controls
|
90 |
-
with st.sidebar:
|
91 |
-
st.markdown("### Voice Settings & Controls")
|
92 |
-
selected_voice = st.selectbox(
|
93 |
-
"Select assistant voice", list(VOICE_OPTIONS.keys()),
|
94 |
-
index=list(VOICE_OPTIONS.keys()).index(st.session_state["selected_voice"])
|
95 |
-
)
|
96 |
-
st.session_state["selected_voice"] = selected_voice
|
97 |
-
|
98 |
-
last_audio = st.session_state.get("last_audio_path")
|
99 |
-
mute_voice = st.session_state.get("mute_voice", False)
|
100 |
-
if last_audio and os.path.exists(last_audio):
|
101 |
-
st.audio(last_audio, format="audio/mp3", autoplay=not mute_voice)
|
102 |
-
if st.button("🔁 Replay Voice"):
|
103 |
-
st.audio(last_audio, format="audio/mp3", autoplay=True)
|
104 |
-
if not mute_voice:
|
105 |
-
if st.button("🔇 Mute Voice"):
|
106 |
-
st.session_state["mute_voice"] = True
|
107 |
-
st.rerun()
|
108 |
-
else:
|
109 |
-
if st.button("🔊 Unmute Voice"):
|
110 |
-
st.session_state["mute_voice"] = False
|
111 |
-
st.rerun()
|
112 |
-
|
113 |
# --- Firestore helpers ---
|
114 |
def get_or_create_thread_id():
|
115 |
doc_ref = db.collection("users").document(user_id)
|
@@ -140,7 +105,7 @@ def display_chat_history():
|
|
140 |
messages = db.collection("users").document(user_id).collection("messages").order_by("timestamp").stream()
|
141 |
assistant_icon_html = "<img src='https://raw.githubusercontent.com/AndrewLORTech/lortechwebsite/main/lorain.jpg' width='22' style='vertical-align:middle; border-radius:50%;'/>"
|
142 |
chat_msgs = []
|
143 |
-
for msg in list(messages)
|
144 |
data = msg.to_dict()
|
145 |
if data["role"] == "user":
|
146 |
chat_msgs.append(
|
@@ -151,57 +116,47 @@ def display_chat_history():
|
|
151 |
f"<div class='stChatMessage' data-testid='stChatMessage-assistant'>{assistant_icon_html} <strong>LORAIN:</strong> {data['content']}</div>"
|
152 |
)
|
153 |
st.markdown('<div class="chat-history-wrapper">' + "".join(chat_msgs) + '</div>', unsafe_allow_html=True)
|
154 |
-
st.markdown('<div id="chat-top-anchor"></div>', unsafe_allow_html=True)
|
155 |
|
156 |
# --- Edge TTS synth ---
|
|
|
|
|
|
|
|
|
|
|
157 |
async def edge_tts_synthesize(text, voice, user_id):
|
158 |
out_path = f"output_{user_id}.mp3"
|
159 |
communicate = edge_tts.Communicate(text, voice)
|
160 |
await communicate.save(out_path)
|
161 |
return out_path
|
162 |
|
163 |
-
def synthesize_voice(text,
|
164 |
-
voice =
|
|
|
165 |
out_path = f"output_{user_id}.mp3"
|
166 |
-
if st.session_state["last_tts_text"] !=
|
167 |
-
with st.spinner(f"Generating voice ({
|
168 |
-
asyncio.run(edge_tts_synthesize(
|
169 |
-
st.session_state["last_tts_text"] =
|
170 |
st.session_state["last_audio_path"] = out_path
|
171 |
-
st.session_state["last_voice"] = voice
|
172 |
return out_path
|
173 |
|
174 |
-
# ---
|
175 |
-
with st.container():
|
176 |
-
st.markdown('<div class="input-top-bar">', unsafe_allow_html=True)
|
177 |
-
col1, col2 = st.columns([10, 1])
|
178 |
-
user_input = col1.chat_input("Type your message here...")
|
179 |
-
if col2.button("🗑️", help="Clear Chat", key="clear-chat-top"):
|
180 |
-
clear_chat_history()
|
181 |
-
st.markdown('</div>', unsafe_allow_html=True)
|
182 |
-
|
183 |
-
# --- CHAT: display under input, latest on top ---
|
184 |
display_chat_history()
|
185 |
|
186 |
-
# ---
|
187 |
-
st.markdown(""
|
188 |
-
|
189 |
-
|
190 |
-
|
191 |
-
|
192 |
-
|
193 |
-
window.setTimeout(function(){
|
194 |
-
var anchor = document.getElementById("chat-top-anchor");
|
195 |
-
if(anchor){ anchor.scrollIntoView({ behavior: "smooth", block: "start" }); }
|
196 |
-
}, 200);
|
197 |
-
</script>
|
198 |
-
""", unsafe_allow_html=True)
|
199 |
|
|
|
200 |
if user_input:
|
201 |
-
# --- OpenAI Assistant Response ---
|
202 |
thread_id = get_or_create_thread_id()
|
203 |
client.beta.threads.messages.create(thread_id=thread_id, role="user", content=user_input)
|
204 |
save_message("user", user_input)
|
|
|
205 |
with st.spinner("Thinking and typing... 💭"):
|
206 |
run = client.beta.threads.runs.create(thread_id=thread_id, assistant_id=assistant_id)
|
207 |
while True:
|
@@ -209,16 +164,15 @@ if user_input:
|
|
209 |
if run_status.status == "completed":
|
210 |
break
|
211 |
time.sleep(1)
|
|
|
212 |
messages_response = client.beta.threads.messages.list(thread_id=thread_id)
|
213 |
latest_response = sorted(messages_response.data, key=lambda x: x.created_at)[-1]
|
214 |
assistant_message = latest_response.content[0].text.value
|
215 |
save_message("assistant", assistant_message)
|
216 |
|
217 |
-
|
218 |
-
audio_path
|
219 |
-
|
220 |
-
audio_path = synthesize_voice(assistant_message, st.session_state["selected_voice"], user_id)
|
221 |
-
st.session_state["last_audio_path"] = audio_path
|
222 |
|
223 |
time.sleep(0.2)
|
224 |
st.rerun()
|
|
|
3 |
import edge_tts
|
4 |
import time
|
5 |
import os
|
6 |
+
import re
|
7 |
import uuid
|
8 |
import firebase_admin
|
9 |
from firebase_admin import credentials, firestore
|
|
|
20 |
assistant_id = os.getenv("assistant_id")
|
21 |
client = OpenAI(api_key=openai_key)
|
22 |
|
23 |
+
# ---- Voice Settings ----
|
24 |
+
FIXED_VOICE_NAME = "Jenny (US, Female)"
|
25 |
+
FIXED_VOICE = "en-US-JennyNeural"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
26 |
|
27 |
# --- State setup
|
28 |
if "user_id" not in st.session_state:
|
29 |
st.session_state["user_id"] = str(uuid.uuid4())
|
30 |
user_id = st.session_state["user_id"]
|
31 |
|
|
|
|
|
32 |
if "last_tts_text" not in st.session_state:
|
33 |
st.session_state["last_tts_text"] = ""
|
34 |
if "last_audio_path" not in st.session_state:
|
35 |
st.session_state["last_audio_path"] = ""
|
|
|
|
|
36 |
|
37 |
+
# --- Page config ---
|
38 |
+
st.set_page_config(page_title="LOR Technologies AI Assistant", layout="wide")
|
39 |
+
|
40 |
+
# --- CSS Styling ---
|
41 |
st.markdown("""
|
42 |
<style>
|
43 |
.block-container {padding-top: 1rem;}
|
|
|
55 |
.stChatMessage[data-testid="stChatMessage-user"] { background: #f0f0f0; color: #000000; }
|
56 |
.stChatMessage[data-testid="stChatMessage-assistant"] { background: #e3f2fd; color: #000000; }
|
57 |
.chat-history-wrapper {
|
58 |
+
margin-top: 0.5em; margin-bottom: 5em; height: 65vh; overflow-y: auto; padding: 0 0.5em;
|
59 |
}
|
60 |
+
.chat-input-bar {
|
61 |
+
position: fixed; bottom: 0; width: 100%; z-index: 100;
|
62 |
+
background: #191b22; padding: 0.6em 1em; border-top: 1px solid #22232c;
|
63 |
+
display: flex; align-items: center; gap: 0.5em;
|
|
|
64 |
}
|
65 |
+
.chat-input-bar input { width: 100%; font-size: 1.1em; }
|
66 |
+
.clear-chat-btn { background: none; border: none; font-size: 1.4em; color: #999; cursor: pointer; }
|
|
|
67 |
</style>
|
68 |
""", unsafe_allow_html=True)
|
69 |
|
70 |
+
# --- Top Branding ---
|
71 |
st.markdown("""
|
72 |
<div class="lor-brand-bar">
|
73 |
<img src="https://lortechnologies.com/wp-content/uploads/2023/03/LOR-Online-Logo.svg" class="logo-mini" />
|
|
|
75 |
</div>
|
76 |
""", unsafe_allow_html=True)
|
77 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
78 |
# --- Firestore helpers ---
|
79 |
def get_or_create_thread_id():
|
80 |
doc_ref = db.collection("users").document(user_id)
|
|
|
105 |
messages = db.collection("users").document(user_id).collection("messages").order_by("timestamp").stream()
|
106 |
assistant_icon_html = "<img src='https://raw.githubusercontent.com/AndrewLORTech/lortechwebsite/main/lorain.jpg' width='22' style='vertical-align:middle; border-radius:50%;'/>"
|
107 |
chat_msgs = []
|
108 |
+
for msg in list(messages):
|
109 |
data = msg.to_dict()
|
110 |
if data["role"] == "user":
|
111 |
chat_msgs.append(
|
|
|
116 |
f"<div class='stChatMessage' data-testid='stChatMessage-assistant'>{assistant_icon_html} <strong>LORAIN:</strong> {data['content']}</div>"
|
117 |
)
|
118 |
st.markdown('<div class="chat-history-wrapper">' + "".join(chat_msgs) + '</div>', unsafe_allow_html=True)
|
|
|
119 |
|
120 |
# --- Edge TTS synth ---
|
121 |
+
def sanitize_tts_text(text):
|
122 |
+
text = re.sub(r'[^\w\s\.\,]', '', text) # remove emojis & special chars
|
123 |
+
text = text.replace('.co.za', 'dot coza')
|
124 |
+
return text
|
125 |
+
|
126 |
async def edge_tts_synthesize(text, voice, user_id):
|
127 |
out_path = f"output_{user_id}.mp3"
|
128 |
communicate = edge_tts.Communicate(text, voice)
|
129 |
await communicate.save(out_path)
|
130 |
return out_path
|
131 |
|
132 |
+
def synthesize_voice(text, user_id):
|
133 |
+
voice = FIXED_VOICE
|
134 |
+
sanitized = sanitize_tts_text(text)
|
135 |
out_path = f"output_{user_id}.mp3"
|
136 |
+
if st.session_state["last_tts_text"] != sanitized or not os.path.exists(out_path):
|
137 |
+
with st.spinner(f"Generating voice ({FIXED_VOICE_NAME})..."):
|
138 |
+
asyncio.run(edge_tts_synthesize(sanitized, voice, user_id))
|
139 |
+
st.session_state["last_tts_text"] = sanitized
|
140 |
st.session_state["last_audio_path"] = out_path
|
|
|
141 |
return out_path
|
142 |
|
143 |
+
# --- CHAT: display history ---
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
144 |
display_chat_history()
|
145 |
|
146 |
+
# --- INPUT BAR (floating at bottom) ---
|
147 |
+
st.markdown('<div class="chat-input-bar">', unsafe_allow_html=True)
|
148 |
+
col1, col2 = st.columns([10, 1])
|
149 |
+
user_input = col1.chat_input("Type your message here...")
|
150 |
+
if col2.button("🗑️", help="Clear Chat", key="clear-chat-bottom"):
|
151 |
+
clear_chat_history()
|
152 |
+
st.markdown('</div>', unsafe_allow_html=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
153 |
|
154 |
+
# --- PROCESS USER INPUT ---
|
155 |
if user_input:
|
|
|
156 |
thread_id = get_or_create_thread_id()
|
157 |
client.beta.threads.messages.create(thread_id=thread_id, role="user", content=user_input)
|
158 |
save_message("user", user_input)
|
159 |
+
|
160 |
with st.spinner("Thinking and typing... 💭"):
|
161 |
run = client.beta.threads.runs.create(thread_id=thread_id, assistant_id=assistant_id)
|
162 |
while True:
|
|
|
164 |
if run_status.status == "completed":
|
165 |
break
|
166 |
time.sleep(1)
|
167 |
+
|
168 |
messages_response = client.beta.threads.messages.list(thread_id=thread_id)
|
169 |
latest_response = sorted(messages_response.data, key=lambda x: x.created_at)[-1]
|
170 |
assistant_message = latest_response.content[0].text.value
|
171 |
save_message("assistant", assistant_message)
|
172 |
|
173 |
+
audio_path = synthesize_voice(assistant_message, user_id)
|
174 |
+
if os.path.exists(audio_path):
|
175 |
+
st.audio(audio_path, format="audio/mp3", autoplay=True)
|
|
|
|
|
176 |
|
177 |
time.sleep(0.2)
|
178 |
st.rerun()
|