Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -55,8 +55,44 @@ st.markdown("""
|
|
55 |
.stChatMessage[data-testid="stChatMessage-user"] { background: #f0f0f0; color: #000000; }
|
56 |
.stChatMessage[data-testid="stChatMessage-assistant"] { background: #e3f2fd; color: #000000; }
|
57 |
.lt-logo { vertical-align: middle; }
|
58 |
-
|
59 |
-
.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
60 |
</style>
|
61 |
""", unsafe_allow_html=True)
|
62 |
st.markdown("""
|
@@ -71,7 +107,11 @@ st.markdown("""
|
|
71 |
# --- Sidebar: All audio/controls here ---
|
72 |
with st.sidebar:
|
73 |
st.markdown("### Voice Settings & Controls")
|
74 |
-
selected_voice = st.selectbox(
|
|
|
|
|
|
|
|
|
75 |
st.session_state["selected_voice"] = selected_voice
|
76 |
|
77 |
# Audio player, always present if we have an mp3
|
@@ -80,7 +120,6 @@ with st.sidebar:
|
|
80 |
|
81 |
# Replay button and audio player
|
82 |
if last_audio and os.path.exists(last_audio):
|
83 |
-
# Autoplay if this was just generated, else manual play
|
84 |
st.audio(last_audio, format="audio/mp3", autoplay=not mute_voice)
|
85 |
if st.button("🔁 Replay Voice"):
|
86 |
st.audio(last_audio, format="audio/mp3", autoplay=True)
|
@@ -113,15 +152,31 @@ def save_message(role, content):
|
|
113 |
"timestamp": firestore.SERVER_TIMESTAMP
|
114 |
})
|
115 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
116 |
def display_chat_history():
|
|
|
117 |
messages = db.collection("users").document(user_id).collection("messages").order_by("timestamp").stream()
|
118 |
assistant_icon_html = "<img src='https://raw.githubusercontent.com/AndrewLORTech/lortechwebsite/main/lorain.jpg' width='24' style='vertical-align:middle; border-radius:50%;'/>"
|
119 |
for msg in list(messages)[::-1]:
|
120 |
data = msg.to_dict()
|
121 |
if data["role"] == "user":
|
122 |
-
st.markdown(
|
|
|
|
|
|
|
123 |
else:
|
124 |
-
st.markdown(
|
|
|
|
|
|
|
|
|
125 |
|
126 |
# --- Edge TTS synth ---
|
127 |
async def edge_tts_synthesize(text, voice, user_id):
|
@@ -146,8 +201,44 @@ def synthesize_voice(text, voice_key, user_id):
|
|
146 |
thread_id = get_or_create_thread_id()
|
147 |
display_chat_history()
|
148 |
|
149 |
-
# ---
|
150 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
151 |
|
152 |
if user_input:
|
153 |
# --- OpenAI Assistant Response ---
|
|
|
55 |
.stChatMessage[data-testid="stChatMessage-user"] { background: #f0f0f0; color: #000000; }
|
56 |
.stChatMessage[data-testid="stChatMessage-assistant"] { background: #e3f2fd; color: #000000; }
|
57 |
.lt-logo { vertical-align: middle; }
|
58 |
+
/* Floating chat input */
|
59 |
+
.floating-input-bar {
|
60 |
+
position: fixed;
|
61 |
+
bottom: 0;
|
62 |
+
left: 50%;
|
63 |
+
transform: translateX(-50%);
|
64 |
+
width: 60vw;
|
65 |
+
min-width: 300px;
|
66 |
+
max-width: 900px;
|
67 |
+
z-index: 1000;
|
68 |
+
background: #fff;
|
69 |
+
border-radius: 1em 1em 0 0;
|
70 |
+
box-shadow: 0 -2px 16px rgba(0,0,0,0.07);
|
71 |
+
padding: 1em 1em 0.7em 1em;
|
72 |
+
display: flex;
|
73 |
+
align-items: center;
|
74 |
+
}
|
75 |
+
.floating-input-bar .chat-input {
|
76 |
+
flex: 1 1 auto;
|
77 |
+
border-radius: 14px;
|
78 |
+
border: 1px solid #d2d2d2;
|
79 |
+
padding: 0.7em 1em;
|
80 |
+
font-size: 1.1em;
|
81 |
+
margin-right: 0.7em;
|
82 |
+
}
|
83 |
+
.floating-input-bar .clear-btn {
|
84 |
+
border: none;
|
85 |
+
background: transparent;
|
86 |
+
font-size: 1.5em;
|
87 |
+
color: #888;
|
88 |
+
cursor: pointer;
|
89 |
+
margin-left: 0.1em;
|
90 |
+
margin-top: 1px;
|
91 |
+
transition: color 0.1s;
|
92 |
+
}
|
93 |
+
.floating-input-bar .clear-btn:hover { color: #e57373; }
|
94 |
+
/* Push main content up above floating bar */
|
95 |
+
.main-chat-area { margin-bottom: 5em; }
|
96 |
</style>
|
97 |
""", unsafe_allow_html=True)
|
98 |
st.markdown("""
|
|
|
107 |
# --- Sidebar: All audio/controls here ---
|
108 |
with st.sidebar:
|
109 |
st.markdown("### Voice Settings & Controls")
|
110 |
+
selected_voice = st.selectbox(
|
111 |
+
"Select assistant voice",
|
112 |
+
list(VOICE_OPTIONS.keys()),
|
113 |
+
index=list(VOICE_OPTIONS.keys()).index(st.session_state["selected_voice"])
|
114 |
+
)
|
115 |
st.session_state["selected_voice"] = selected_voice
|
116 |
|
117 |
# Audio player, always present if we have an mp3
|
|
|
120 |
|
121 |
# Replay button and audio player
|
122 |
if last_audio and os.path.exists(last_audio):
|
|
|
123 |
st.audio(last_audio, format="audio/mp3", autoplay=not mute_voice)
|
124 |
if st.button("🔁 Replay Voice"):
|
125 |
st.audio(last_audio, format="audio/mp3", autoplay=True)
|
|
|
152 |
"timestamp": firestore.SERVER_TIMESTAMP
|
153 |
})
|
154 |
|
155 |
+
def clear_chat_history():
|
156 |
+
user_doc_ref = db.collection("users").document(user_id)
|
157 |
+
for msg in user_doc_ref.collection("messages").stream():
|
158 |
+
msg.reference.delete()
|
159 |
+
user_doc_ref.delete()
|
160 |
+
st.session_state.clear()
|
161 |
+
st.rerun()
|
162 |
+
|
163 |
def display_chat_history():
|
164 |
+
st.markdown("<div class='main-chat-area'>", unsafe_allow_html=True)
|
165 |
messages = db.collection("users").document(user_id).collection("messages").order_by("timestamp").stream()
|
166 |
assistant_icon_html = "<img src='https://raw.githubusercontent.com/AndrewLORTech/lortechwebsite/main/lorain.jpg' width='24' style='vertical-align:middle; border-radius:50%;'/>"
|
167 |
for msg in list(messages)[::-1]:
|
168 |
data = msg.to_dict()
|
169 |
if data["role"] == "user":
|
170 |
+
st.markdown(
|
171 |
+
f"<div class='stChatMessage' data-testid='stChatMessage-user'>👤 <strong>You:</strong> {data['content']}</div>",
|
172 |
+
unsafe_allow_html=True,
|
173 |
+
)
|
174 |
else:
|
175 |
+
st.markdown(
|
176 |
+
f"<div class='stChatMessage' data-testid='stChatMessage-assistant'>{assistant_icon_html} <strong>LORAIN:</strong> {data['content']}</div>",
|
177 |
+
unsafe_allow_html=True,
|
178 |
+
)
|
179 |
+
st.markdown("</div>", unsafe_allow_html=True)
|
180 |
|
181 |
# --- Edge TTS synth ---
|
182 |
async def edge_tts_synthesize(text, voice, user_id):
|
|
|
201 |
thread_id = get_or_create_thread_id()
|
202 |
display_chat_history()
|
203 |
|
204 |
+
# --- Floating Chat Input Bar & Clear Button ---
|
205 |
+
st.markdown("""
|
206 |
+
<div class="floating-input-bar">
|
207 |
+
<form action="" method="POST" id="lorain_input_form" style="width:100%;display:flex;">
|
208 |
+
<input id="lorain_user_input" class="chat-input" name="lorain_user_input" type="text" placeholder="Type your message here..." autocomplete="off" autofocus />
|
209 |
+
<button class="clear-btn" type="submit" name="clear_chat" title="Clear chat">🗑</button>
|
210 |
+
</form>
|
211 |
+
<script>
|
212 |
+
// Auto-focus and scroll to bottom on new message
|
213 |
+
let chatbox = document.getElementById("chatbox");
|
214 |
+
if (chatbox) chatbox.scrollTop = chatbox.scrollHeight;
|
215 |
+
</script>
|
216 |
+
</div>
|
217 |
+
""", unsafe_allow_html=True)
|
218 |
+
|
219 |
+
# --- Handle Chat Input/Clear POST ---
|
220 |
+
import streamlit.web.server.websocket_headers
|
221 |
+
from urllib.parse import parse_qs
|
222 |
+
|
223 |
+
# Use query params to emulate form submission state (for Streamlit stateless-ness)
|
224 |
+
query_params = st.experimental_get_query_params()
|
225 |
+
import streamlit.runtime.scriptrunner.script_run_context as sctx
|
226 |
+
ctx = sctx.get_script_run_ctx()
|
227 |
+
form_data = {}
|
228 |
+
if ctx and hasattr(ctx, 'request'):
|
229 |
+
content_length = ctx.request.headers.get('content-length')
|
230 |
+
if content_length:
|
231 |
+
body = ctx.request.rfile.read(int(content_length)).decode()
|
232 |
+
form_data = parse_qs(body)
|
233 |
+
|
234 |
+
# Detect clear chat
|
235 |
+
if form_data.get("clear_chat"):
|
236 |
+
clear_chat_history()
|
237 |
+
|
238 |
+
# Detect user input
|
239 |
+
user_input = ""
|
240 |
+
if form_data.get("lorain_user_input"):
|
241 |
+
user_input = form_data.get("lorain_user_input")[0].strip()
|
242 |
|
243 |
if user_input:
|
244 |
# --- OpenAI Assistant Response ---
|