IAMTFRMZA commited on
Commit
1291105
·
verified ·
1 Parent(s): 6f9c618

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +40 -79
app.py CHANGED
@@ -21,7 +21,13 @@ client = OpenAI(api_key=openai_key)
21
 
22
  VOICE_OPTIONS = {
23
  "Jenny (US, Female)": "en-US-JennyNeural",
24
- "Leah (SA, Female)": "en-ZA-LeahNeural"
 
 
 
 
 
 
25
  }
26
 
27
  st.set_page_config(page_title="LOR Technologies AI Assistant", layout="wide")
@@ -39,10 +45,8 @@ if "last_audio_path" not in st.session_state:
39
  st.session_state["last_audio_path"] = ""
40
  if "selected_voice" not in st.session_state:
41
  st.session_state["selected_voice"] = "Jenny (US, Female)"
42
- if "is_thinking" not in st.session_state:
43
- st.session_state["is_thinking"] = False
44
 
45
- # --- CSS ---
46
  st.markdown("""
47
  <style>
48
  .block-container {padding-top: 1rem;}
@@ -59,54 +63,22 @@ st.markdown("""
59
  .stChatMessage { max-width: 85%; border-radius: 12px; padding: 8px; margin-bottom: 10px; }
60
  .stChatMessage[data-testid="stChatMessage-user"] { background: #f0f0f0; color: #000000; }
61
  .stChatMessage[data-testid="stChatMessage-assistant"] { background: #e3f2fd; color: #000000; }
62
- .chat-body {
63
- display: flex;
64
- flex-direction: column;
65
- justify-content: flex-end;
66
- height: calc(100vh - 230px);
67
- margin-bottom: 6em;
68
- }
69
  .chat-history-wrapper {
70
- flex-grow: 1;
71
- overflow-y: auto;
72
- padding: 0 0.5em;
73
- }
74
- .lorain-thinking {
75
- text-align: center;
76
- color: #ddd;
77
- font-size: 14px;
78
- margin: 0.5em 0;
79
- }
80
- .chat-input-bar {
81
- position: fixed;
82
- bottom: 0;
83
- left: 0;
84
- right: 0;
85
- z-index: 100;
86
- background: #191b22;
87
- padding: 0.6em 1em;
88
- border-top: 1px solid #22232c;
89
- display: flex;
90
- align-items: center;
91
- gap: 0.5em;
92
- }
93
- .chat-input-bar input {
94
- flex-grow: 1;
95
- font-size: 1.1em;
96
- min-width: 10em;
97
  }
98
- .clear-chat-btn {
99
- flex: 0 0 auto;
100
- background: none;
101
- border: none;
102
- font-size: 1.4em;
103
- color: #999;
104
- cursor: pointer;
105
  }
 
 
 
106
  </style>
107
  """, unsafe_allow_html=True)
108
 
109
- # --- Branding ---
110
  st.markdown("""
111
  <div class="lor-brand-bar">
112
  <img src="https://lortechnologies.com/wp-content/uploads/2023/03/LOR-Online-Logo.svg" class="logo-mini" />
@@ -168,7 +140,7 @@ def display_chat_history():
168
  messages = db.collection("users").document(user_id).collection("messages").order_by("timestamp").stream()
169
  assistant_icon_html = "<img src='https://raw.githubusercontent.com/AndrewLORTech/lortechwebsite/main/lorain.jpg' width='22' style='vertical-align:middle; border-radius:50%;'/>"
170
  chat_msgs = []
171
- for msg in list(messages):
172
  data = msg.to_dict()
173
  if data["role"] == "user":
174
  chat_msgs.append(
@@ -179,8 +151,9 @@ def display_chat_history():
179
  f"<div class='stChatMessage' data-testid='stChatMessage-assistant'>{assistant_icon_html} <strong>LORAIN:</strong> {data['content']}</div>"
180
  )
181
  st.markdown('<div class="chat-history-wrapper">' + "".join(chat_msgs) + '</div>', unsafe_allow_html=True)
 
182
 
183
- # --- Edge TTS ---
184
  async def edge_tts_synthesize(text, voice, user_id):
185
  out_path = f"output_{user_id}.mp3"
186
  communicate = edge_tts.Communicate(text, voice)
@@ -198,47 +171,37 @@ def synthesize_voice(text, voice_key, user_id):
198
  st.session_state["last_voice"] = voice
199
  return out_path
200
 
201
- # --- Chat BODY ---
202
- st.markdown('<div class="chat-body">', unsafe_allow_html=True)
203
- display_chat_history()
204
-
205
- if st.session_state.get("is_thinking", False):
206
- st.markdown('<div class="lorain-thinking">🤖 <em>LORAIN is thinking...</em></div>', unsafe_allow_html=True)
207
-
208
- st.markdown('</div>', unsafe_allow_html=True)
209
-
210
- # --- Input bar (Streamlit FORM) ---
211
- with st.form("chat_form", clear_on_submit=True):
212
- st.markdown('<div class="chat-input-bar">', unsafe_allow_html=True)
213
- user_input = st.text_input("Type your message here:", label_visibility="collapsed")
214
- submit_button = st.form_submit_button("Send")
215
- clear_button = st.button("🗑️", help="Clear Chat", key="clear-chat-bottom")
216
  st.markdown('</div>', unsafe_allow_html=True)
217
 
218
- if clear_button:
219
- clear_chat_history()
220
 
221
- # --- Auto-scroll JS ---
222
  st.markdown("""
223
  <script>
224
  window.onload = function() {
225
- var chatWrapper = document.querySelector('.chat-history-wrapper');
226
- if(chatWrapper){ chatWrapper.scrollTop = chatWrapper.scrollHeight; }
227
  };
228
- setTimeout(function(){
229
- var chatWrapper = document.querySelector('.chat-history-wrapper');
230
- if(chatWrapper){ chatWrapper.scrollTop = chatWrapper.scrollHeight; }
231
- }, 300);
232
  </script>
233
  """, unsafe_allow_html=True)
234
 
235
- # --- Handle user input ---
236
- if submit_button and user_input:
237
- st.session_state["is_thinking"] = True
238
  thread_id = get_or_create_thread_id()
239
  client.beta.threads.messages.create(thread_id=thread_id, role="user", content=user_input)
240
  save_message("user", user_input)
241
-
242
  with st.spinner("Thinking and typing... 💭"):
243
  run = client.beta.threads.runs.create(thread_id=thread_id, assistant_id=assistant_id)
244
  while True:
@@ -246,7 +209,6 @@ if submit_button and user_input:
246
  if run_status.status == "completed":
247
  break
248
  time.sleep(1)
249
-
250
  messages_response = client.beta.threads.messages.list(thread_id=thread_id)
251
  latest_response = sorted(messages_response.data, key=lambda x: x.created_at)[-1]
252
  assistant_message = latest_response.content[0].text.value
@@ -258,6 +220,5 @@ if submit_button and user_input:
258
  audio_path = synthesize_voice(assistant_message, st.session_state["selected_voice"], user_id)
259
  st.session_state["last_audio_path"] = audio_path
260
 
261
- st.session_state["is_thinking"] = False
262
  time.sleep(0.2)
263
- st.rerun()
 
21
 
22
  VOICE_OPTIONS = {
23
  "Jenny (US, Female)": "en-US-JennyNeural",
24
+ "Aria (US, Female)": "en-US-AriaNeural",
25
+ "Ryan (UK, Male)": "en-GB-RyanNeural",
26
+ "Natasha (AU, Female)": "en-AU-NatashaNeural",
27
+ "William (AU, Male)": "en-AU-WilliamNeural",
28
+ "Libby (UK, Female)": "en-GB-LibbyNeural",
29
+ "Leah (SA, Female)": "en-ZA-LeahNeural",
30
+ "Luke (SA, Male)": "en-ZA-LukeNeural"
31
  }
32
 
33
  st.set_page_config(page_title="LOR Technologies AI Assistant", layout="wide")
 
45
  st.session_state["last_audio_path"] = ""
46
  if "selected_voice" not in st.session_state:
47
  st.session_state["selected_voice"] = "Jenny (US, Female)"
 
 
48
 
49
+ # --- CSS for logo, chat, and input bar at top ---
50
  st.markdown("""
51
  <style>
52
  .block-container {padding-top: 1rem;}
 
63
  .stChatMessage { max-width: 85%; border-radius: 12px; padding: 8px; margin-bottom: 10px; }
64
  .stChatMessage[data-testid="stChatMessage-user"] { background: #f0f0f0; color: #000000; }
65
  .stChatMessage[data-testid="stChatMessage-assistant"] { background: #e3f2fd; color: #000000; }
 
 
 
 
 
 
 
66
  .chat-history-wrapper {
67
+ margin-top: 0.5em; padding-bottom: 2em; min-height: 60vh;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
68
  }
69
+ .input-top-bar {
70
+ position: sticky; top: 0; z-index: 20;
71
+ background: #191b22; padding: 0.5em 0 0.25em 0;
72
+ display: flex; align-items: center; gap: 0.4em;
73
+ border-bottom: 1px solid #22232c;
 
 
74
  }
75
+ .input-top-bar .element-container { flex: 1 1 auto; }
76
+ .input-top-bar input { font-size: 1.12em !important; }
77
+ .clear-chat-btn { background: none; border: none; font-size: 1.4em; color: #666; cursor: pointer; }
78
  </style>
79
  """, unsafe_allow_html=True)
80
 
81
+ # --- Top Branding, Mini Logo ---
82
  st.markdown("""
83
  <div class="lor-brand-bar">
84
  <img src="https://lortechnologies.com/wp-content/uploads/2023/03/LOR-Online-Logo.svg" class="logo-mini" />
 
140
  messages = db.collection("users").document(user_id).collection("messages").order_by("timestamp").stream()
141
  assistant_icon_html = "<img src='https://raw.githubusercontent.com/AndrewLORTech/lortechwebsite/main/lorain.jpg' width='22' style='vertical-align:middle; border-radius:50%;'/>"
142
  chat_msgs = []
143
+ for msg in list(messages)[::-1]: # Most recent first!
144
  data = msg.to_dict()
145
  if data["role"] == "user":
146
  chat_msgs.append(
 
151
  f"<div class='stChatMessage' data-testid='stChatMessage-assistant'>{assistant_icon_html} <strong>LORAIN:</strong> {data['content']}</div>"
152
  )
153
  st.markdown('<div class="chat-history-wrapper">' + "".join(chat_msgs) + '</div>', unsafe_allow_html=True)
154
+ st.markdown('<div id="chat-top-anchor"></div>', unsafe_allow_html=True)
155
 
156
+ # --- Edge TTS synth ---
157
  async def edge_tts_synthesize(text, voice, user_id):
158
  out_path = f"output_{user_id}.mp3"
159
  communicate = edge_tts.Communicate(text, voice)
 
171
  st.session_state["last_voice"] = voice
172
  return out_path
173
 
174
+ # --- INPUT AT THE TOP ---
175
+ with st.container():
176
+ st.markdown('<div class="input-top-bar">', unsafe_allow_html=True)
177
+ col1, col2 = st.columns([10, 1])
178
+ user_input = col1.chat_input("Type your message here...")
179
+ if col2.button("🗑️", help="Clear Chat", key="clear-chat-top"):
180
+ clear_chat_history()
 
 
 
 
 
 
 
 
181
  st.markdown('</div>', unsafe_allow_html=True)
182
 
183
+ # --- CHAT: display under input, latest on top ---
184
+ display_chat_history()
185
 
186
+ # --- JS: auto-scroll to top on new message ---
187
  st.markdown("""
188
  <script>
189
  window.onload = function() {
190
+ var anchor = document.getElementById("chat-top-anchor");
191
+ if(anchor){ anchor.scrollIntoView({ behavior: "smooth", block: "start" }); }
192
  };
193
+ window.setTimeout(function(){
194
+ var anchor = document.getElementById("chat-top-anchor");
195
+ if(anchor){ anchor.scrollIntoView({ behavior: "smooth", block: "start" }); }
196
+ }, 200);
197
  </script>
198
  """, unsafe_allow_html=True)
199
 
200
+ if user_input:
201
+ # --- OpenAI Assistant Response ---
 
202
  thread_id = get_or_create_thread_id()
203
  client.beta.threads.messages.create(thread_id=thread_id, role="user", content=user_input)
204
  save_message("user", user_input)
 
205
  with st.spinner("Thinking and typing... 💭"):
206
  run = client.beta.threads.runs.create(thread_id=thread_id, assistant_id=assistant_id)
207
  while True:
 
209
  if run_status.status == "completed":
210
  break
211
  time.sleep(1)
 
212
  messages_response = client.beta.threads.messages.list(thread_id=thread_id)
213
  latest_response = sorted(messages_response.data, key=lambda x: x.created_at)[-1]
214
  assistant_message = latest_response.content[0].text.value
 
220
  audio_path = synthesize_voice(assistant_message, st.session_state["selected_voice"], user_id)
221
  st.session_state["last_audio_path"] = audio_path
222
 
 
223
  time.sleep(0.2)
224
+ st.rerun()