IAMTFRMZA commited on
Commit
ba79a7e
Β·
verified Β·
1 Parent(s): c6aeced

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +34 -60
app.py CHANGED
@@ -31,10 +31,8 @@ VOICE_OPTIONS = {
31
  "Luke (SA, Male)": "en-ZA-LukeNeural"
32
  }
33
 
34
- # --- Streamlit Config ---
35
  st.set_page_config(page_title="LOR Technologies AI Assistant", layout="wide")
36
 
37
- # --- User/session state ---
38
  if "user_id" not in st.session_state:
39
  st.session_state["user_id"] = str(uuid.uuid4())
40
  user_id = st.session_state["user_id"]
@@ -48,12 +46,6 @@ if "last_audio_path" not in st.session_state:
48
  if "selected_voice" not in st.session_state:
49
  st.session_state["selected_voice"] = "Jenny (US, Female)"
50
 
51
- # --- Sidebar for Voice Selection ---
52
- with st.sidebar:
53
- st.markdown("### Voice Settings")
54
- selected_voice = st.selectbox("Select assistant voice", list(VOICE_OPTIONS.keys()), index=list(VOICE_OPTIONS.keys()).index(st.session_state["selected_voice"]))
55
- st.session_state["selected_voice"] = selected_voice
56
-
57
  # --- Branding & Styling ---
58
  st.markdown("""
59
  <style>
@@ -63,6 +55,8 @@ st.markdown("""
63
  .stChatMessage[data-testid="stChatMessage-user"] { background: #f0f0f0; color: #000000; }
64
  .stChatMessage[data-testid="stChatMessage-assistant"] { background: #e3f2fd; color: #000000; }
65
  .lt-logo { vertical-align: middle; }
 
 
66
  </style>
67
  """, unsafe_allow_html=True)
68
  st.markdown("""
@@ -74,6 +68,33 @@ st.markdown("""
74
  </div>
75
  """, unsafe_allow_html=True)
76
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
77
  # --- Firestore helpers ---
78
  def get_or_create_thread_id():
79
  doc_ref = db.collection("users").document(user_id)
@@ -121,26 +142,13 @@ def synthesize_voice(text, voice_key, user_id):
121
  st.session_state["last_voice"] = voice
122
  return out_path
123
 
124
- # --- Main Chat UI ---
125
- input_col, clear_col = st.columns([9, 1])
126
- with input_col:
127
- user_input = st.chat_input("Type your message here...")
128
-
129
- with clear_col:
130
- if st.button("πŸ—‘οΈ", key="clear-chat", help="Clear Chat"):
131
- try:
132
- user_doc_ref = db.collection("users").document(user_id)
133
- for msg in user_doc_ref.collection("messages").stream():
134
- msg.reference.delete()
135
- user_doc_ref.delete()
136
- st.session_state.clear()
137
- st.rerun()
138
- except Exception as e:
139
- st.error(f"Failed to clear chat: {e}")
140
-
141
  thread_id = get_or_create_thread_id()
142
  display_chat_history()
143
 
 
 
 
144
  if user_input:
145
  # --- OpenAI Assistant Response ---
146
  client.beta.threads.messages.create(thread_id=thread_id, role="user", content=user_input)
@@ -163,41 +171,7 @@ if user_input:
163
  if not mute_voice and assistant_message.strip():
164
  audio_path = synthesize_voice(assistant_message, st.session_state["selected_voice"], user_id)
165
  st.session_state["last_audio_path"] = audio_path
166
- st.audio(audio_path, format="audio/mp3", autoplay=True)
167
- elif mute_voice:
168
- st.info("πŸ”‡ Voice is muted. Click Unmute below to enable assistant speech.")
169
-
170
- # --- Controls (Mute/Unmute/Replay) ---
171
- col1, col2 = st.columns([1, 1])
172
- with col1:
173
- if not mute_voice and st.button("πŸ”‡ Mute Voice"):
174
- st.session_state["mute_voice"] = True
175
- st.rerun()
176
- elif mute_voice and st.button("πŸ”Š Unmute Voice"):
177
- st.session_state["mute_voice"] = False
178
- st.rerun()
179
- with col2:
180
- # Replay button: Always available if last_audio_path exists
181
- if st.session_state.get("last_audio_path") and os.path.exists(st.session_state["last_audio_path"]):
182
- if st.button("πŸ” Replay Voice"):
183
- st.audio(st.session_state["last_audio_path"], format="audio/mp3", autoplay=True)
184
 
185
  time.sleep(0.2)
186
  st.rerun()
187
- else:
188
- # Always show last audio with replay if available
189
- if st.session_state.get("last_audio_path") and os.path.exists(st.session_state["last_audio_path"]) and not st.session_state["mute_voice"]:
190
- st.audio(st.session_state["last_audio_path"], format="audio/mp3", autoplay=False)
191
- # Controls: Only show Replay when idle
192
- if st.button("πŸ” Replay Last Voice"):
193
- st.audio(st.session_state["last_audio_path"], format="audio/mp3", autoplay=True)
194
-
195
- # Show mute/unmute in idle state too
196
- if not st.session_state["mute_voice"]:
197
- if st.button("πŸ”‡ Mute Voice"):
198
- st.session_state["mute_voice"] = True
199
- st.rerun()
200
- else:
201
- if st.button("πŸ”Š Unmute Voice"):
202
- st.session_state["mute_voice"] = False
203
- st.rerun()
 
31
  "Luke (SA, Male)": "en-ZA-LukeNeural"
32
  }
33
 
 
34
  st.set_page_config(page_title="LOR Technologies AI Assistant", layout="wide")
35
 
 
36
  if "user_id" not in st.session_state:
37
  st.session_state["user_id"] = str(uuid.uuid4())
38
  user_id = st.session_state["user_id"]
 
46
  if "selected_voice" not in st.session_state:
47
  st.session_state["selected_voice"] = "Jenny (US, Female)"
48
 
 
 
 
 
 
 
49
  # --- Branding & Styling ---
50
  st.markdown("""
51
  <style>
 
55
  .stChatMessage[data-testid="stChatMessage-user"] { background: #f0f0f0; color: #000000; }
56
  .stChatMessage[data-testid="stChatMessage-assistant"] { background: #e3f2fd; color: #000000; }
57
  .lt-logo { vertical-align: middle; }
58
+ .st-emotion-cache-1avcm0n { justify-content: flex-end !important; }
59
+ .stChatInputContainer { position: fixed !important; bottom: 0; width: 80vw; z-index: 100; left: 10vw; background: #11131a; }
60
  </style>
61
  """, unsafe_allow_html=True)
62
  st.markdown("""
 
68
  </div>
69
  """, unsafe_allow_html=True)
70
 
71
+ # --- Sidebar: All audio/controls here ---
72
+ with st.sidebar:
73
+ st.markdown("### Voice Settings & Controls")
74
+ selected_voice = st.selectbox("Select assistant voice", list(VOICE_OPTIONS.keys()), index=list(VOICE_OPTIONS.keys()).index(st.session_state["selected_voice"]))
75
+ st.session_state["selected_voice"] = selected_voice
76
+
77
+ # Audio player, always present if we have an mp3
78
+ last_audio = st.session_state.get("last_audio_path")
79
+ mute_voice = st.session_state.get("mute_voice", False)
80
+
81
+ # Replay button and audio player
82
+ if last_audio and os.path.exists(last_audio):
83
+ # Autoplay if this was just generated, else manual play
84
+ st.audio(last_audio, format="audio/mp3", autoplay=not mute_voice)
85
+ if st.button("πŸ” Replay Voice"):
86
+ st.audio(last_audio, format="audio/mp3", autoplay=True)
87
+
88
+ # Mute/Unmute
89
+ if not mute_voice:
90
+ if st.button("πŸ”‡ Mute Voice"):
91
+ st.session_state["mute_voice"] = True
92
+ st.rerun()
93
+ else:
94
+ if st.button("πŸ”Š Unmute Voice"):
95
+ st.session_state["mute_voice"] = False
96
+ st.rerun()
97
+
98
  # --- Firestore helpers ---
99
  def get_or_create_thread_id():
100
  doc_ref = db.collection("users").document(user_id)
 
142
  st.session_state["last_voice"] = voice
143
  return out_path
144
 
145
+ # --- Main Chat UI (text only!) ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
146
  thread_id = get_or_create_thread_id()
147
  display_chat_history()
148
 
149
+ # --- Static Chat Input at Bottom ---
150
+ user_input = st.chat_input("Type your message here...")
151
+
152
  if user_input:
153
  # --- OpenAI Assistant Response ---
154
  client.beta.threads.messages.create(thread_id=thread_id, role="user", content=user_input)
 
171
  if not mute_voice and assistant_message.strip():
172
  audio_path = synthesize_voice(assistant_message, st.session_state["selected_voice"], user_id)
173
  st.session_state["last_audio_path"] = audio_path
174
+ # No audio in main area! Sidebar will autoplay due to above code.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
175
 
176
  time.sleep(0.2)
177
  st.rerun()