IAMTFRMZA commited on
Commit
e76a3ee
ยท
verified ยท
1 Parent(s): 5ebf867

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +38 -40
app.py CHANGED
@@ -8,18 +8,17 @@ import firebase_admin
8
  from firebase_admin import credentials, firestore
9
  from openai import OpenAI
10
 
11
- # ---- Firebase setup ----
12
  if not firebase_admin._apps:
13
  cred = credentials.Certificate("firebase-service-account.json")
14
  firebase_admin.initialize_app(cred)
15
  db = firestore.client()
16
 
17
- # ---- OpenAI setup ----
18
  openai_key = os.getenv("openai_key")
19
  assistant_id = os.getenv("assistant_id")
20
  client = OpenAI(api_key=openai_key)
21
 
22
- # ---- Edge TTS voices ----
23
  VOICE_OPTIONS = {
24
  "Jenny (US, Female)": "en-US-JennyNeural",
25
  "Aria (US, Female)": "en-US-AriaNeural",
@@ -33,6 +32,7 @@ VOICE_OPTIONS = {
33
 
34
  st.set_page_config(page_title="LOR Technologies AI Assistant", layout="wide")
35
 
 
36
  if "user_id" not in st.session_state:
37
  st.session_state["user_id"] = str(uuid.uuid4())
38
  user_id = st.session_state["user_id"]
@@ -46,18 +46,26 @@ if "last_audio_path" not in st.session_state:
46
  if "selected_voice" not in st.session_state:
47
  st.session_state["selected_voice"] = "Jenny (US, Female)"
48
 
49
- # --- Branding & Styling ---
50
  st.markdown("""
51
  <style>
52
- .block-container {padding-top: 1rem; padding-bottom: 0rem;}
53
- header {visibility: hidden;}
54
  .stChatMessage { max-width: 85%; border-radius: 12px; padding: 8px; margin-bottom: 10px; }
55
  .stChatMessage[data-testid="stChatMessage-user"] { background: #f0f0f0; color: #000000; }
56
  .stChatMessage[data-testid="stChatMessage-assistant"] { background: #e3f2fd; color: #000000; }
57
  .lt-logo { vertical-align: middle; }
58
- .fixed-bottom-bar { position: fixed; bottom: 0; left: 0; width: 100vw; background: #181a22; padding: 1.2em 0.5em 0.8em 0.5em; z-index: 9999; box-shadow: 0 -2px 8px rgba(0,0,0,0.06);}
59
- .chat-area { margin-bottom: 6.1em; }
60
- .st-emotion-cache-1avcm0n { justify-content: flex-end !important; }
 
 
 
 
 
 
 
 
 
61
  </style>
62
  """, unsafe_allow_html=True)
63
  st.markdown("""
@@ -69,27 +77,21 @@ st.markdown("""
69
  </div>
70
  """, unsafe_allow_html=True)
71
 
72
- # --- Sidebar: All audio/controls here ---
73
  with st.sidebar:
74
  st.markdown("### Voice Settings & Controls")
75
  selected_voice = st.selectbox(
76
- "Select assistant voice",
77
- list(VOICE_OPTIONS.keys()),
78
  index=list(VOICE_OPTIONS.keys()).index(st.session_state["selected_voice"])
79
  )
80
  st.session_state["selected_voice"] = selected_voice
81
 
82
- # Audio player, always present if we have an mp3
83
  last_audio = st.session_state.get("last_audio_path")
84
  mute_voice = st.session_state.get("mute_voice", False)
85
-
86
- # Replay button and audio player
87
  if last_audio and os.path.exists(last_audio):
88
  st.audio(last_audio, format="audio/mp3", autoplay=not mute_voice)
89
  if st.button("๐Ÿ” Replay Voice"):
90
  st.audio(last_audio, format="audio/mp3", autoplay=True)
91
-
92
- # Mute/Unmute
93
  if not mute_voice:
94
  if st.button("๐Ÿ”‡ Mute Voice"):
95
  st.session_state["mute_voice"] = True
@@ -99,7 +101,6 @@ with st.sidebar:
99
  st.session_state["mute_voice"] = False
100
  st.rerun()
101
 
102
- # --- Firestore helpers ---
103
  def get_or_create_thread_id():
104
  doc_ref = db.collection("users").document(user_id)
105
  doc = doc_ref.get()
@@ -126,24 +127,22 @@ def clear_chat_history():
126
  st.rerun()
127
 
128
  def display_chat_history():
129
- st.markdown("<div class='chat-area'>", unsafe_allow_html=True)
130
  messages = db.collection("users").document(user_id).collection("messages").order_by("timestamp").stream()
131
  assistant_icon_html = "<img src='https://raw.githubusercontent.com/AndrewLORTech/lortechwebsite/main/lorain.jpg' width='24' style='vertical-align:middle; border-radius:50%;'/>"
 
132
  for msg in list(messages)[::-1]:
133
  data = msg.to_dict()
134
  if data["role"] == "user":
135
- st.markdown(
136
- f"<div class='stChatMessage' data-testid='stChatMessage-user'>๐Ÿ‘ค <strong>You:</strong> {data['content']}</div>",
137
- unsafe_allow_html=True,
138
  )
139
  else:
140
- st.markdown(
141
- f"<div class='stChatMessage' data-testid='stChatMessage-assistant'>{assistant_icon_html} <strong>LORAIN:</strong> {data['content']}</div>",
142
- unsafe_allow_html=True,
143
  )
144
- st.markdown("</div>", unsafe_allow_html=True)
145
 
146
- # --- Edge TTS synth ---
147
  async def edge_tts_synthesize(text, voice, user_id):
148
  out_path = f"output_{user_id}.mp3"
149
  communicate = edge_tts.Communicate(text, voice)
@@ -153,7 +152,6 @@ async def edge_tts_synthesize(text, voice, user_id):
153
  def synthesize_voice(text, voice_key, user_id):
154
  voice = VOICE_OPTIONS[voice_key]
155
  out_path = f"output_{user_id}.mp3"
156
- # Only synthesize if text changed or file missing or voice changed
157
  if st.session_state["last_tts_text"] != text or not os.path.exists(out_path) or st.session_state.get("last_voice") != voice:
158
  with st.spinner(f"Generating voice ({voice_key})..."):
159
  asyncio.run(edge_tts_synthesize(text, voice, user_id))
@@ -162,40 +160,40 @@ def synthesize_voice(text, voice_key, user_id):
162
  st.session_state["last_voice"] = voice
163
  return out_path
164
 
165
- # --- Main Chat UI (text only!) ---
166
- thread_id = get_or_create_thread_id()
167
  display_chat_history()
 
168
 
169
- # --- Fixed bottom bar: input + clear chat ---
170
  with st.container():
171
- cols = st.columns([10, 1])
172
- user_input = cols[0].chat_input("Type your message here...")
173
- if cols[1].button("๐Ÿ—‘๏ธ", help="Clear Chat"):
 
174
  clear_chat_history()
 
175
 
176
  if user_input:
177
  # --- OpenAI Assistant Response ---
178
- client.beta.threads.messages.create(thread_id=thread_id, role="user", content=user_input)
179
  save_message("user", user_input)
180
  with st.spinner("Thinking and typing... ๐Ÿ’ญ"):
181
- run = client.beta.threads.runs.create(thread_id=thread_id, assistant_id=assistant_id)
182
  while True:
183
- run_status = client.beta.threads.runs.retrieve(thread_id=thread_id, run_id=run.id)
184
  if run_status.status == "completed":
185
  break
186
  time.sleep(1)
187
- messages_response = client.beta.threads.messages.list(thread_id=thread_id)
188
  latest_response = sorted(messages_response.data, key=lambda x: x.created_at)[-1]
189
  assistant_message = latest_response.content[0].text.value
190
  save_message("assistant", assistant_message)
191
 
192
- # --- TTS: Speak unless muted ---
193
  mute_voice = st.session_state.get("mute_voice", False)
194
  audio_path = None
195
  if not mute_voice and assistant_message.strip():
196
  audio_path = synthesize_voice(assistant_message, st.session_state["selected_voice"], user_id)
197
  st.session_state["last_audio_path"] = audio_path
198
- # No audio in main area! Sidebar will autoplay due to above code.
199
 
200
  time.sleep(0.2)
201
  st.rerun()
 
8
  from firebase_admin import credentials, firestore
9
  from openai import OpenAI
10
 
11
+ # Firebase setup
12
  if not firebase_admin._apps:
13
  cred = credentials.Certificate("firebase-service-account.json")
14
  firebase_admin.initialize_app(cred)
15
  db = firestore.client()
16
 
17
+ # OpenAI setup
18
  openai_key = os.getenv("openai_key")
19
  assistant_id = os.getenv("assistant_id")
20
  client = OpenAI(api_key=openai_key)
21
 
 
22
  VOICE_OPTIONS = {
23
  "Jenny (US, Female)": "en-US-JennyNeural",
24
  "Aria (US, Female)": "en-US-AriaNeural",
 
32
 
33
  st.set_page_config(page_title="LOR Technologies AI Assistant", layout="wide")
34
 
35
+ # State
36
  if "user_id" not in st.session_state:
37
  st.session_state["user_id"] = str(uuid.uuid4())
38
  user_id = st.session_state["user_id"]
 
46
  if "selected_voice" not in st.session_state:
47
  st.session_state["selected_voice"] = "Jenny (US, Female)"
48
 
49
+ # CSS for floating bar illusion (margin for chat, input pinned visually)
50
  st.markdown("""
51
  <style>
52
+ .block-container {padding-top: 1rem;}
 
53
  .stChatMessage { max-width: 85%; border-radius: 12px; padding: 8px; margin-bottom: 10px; }
54
  .stChatMessage[data-testid="stChatMessage-user"] { background: #f0f0f0; color: #000000; }
55
  .stChatMessage[data-testid="stChatMessage-assistant"] { background: #e3f2fd; color: #000000; }
56
  .lt-logo { vertical-align: middle; }
57
+ .footer-fakebar {
58
+ position: fixed;
59
+ left: 0; bottom: 0;
60
+ width: 100vw;
61
+ background: #181a22;
62
+ box-shadow: 0 -2px 8px rgba(0,0,0,0.10);
63
+ padding: 1.2em 0.5em 0.8em 0.5em;
64
+ z-index: 9999;
65
+ }
66
+ .footer-fakebar .element-container { flex: 1 1 auto; }
67
+ .footer-fakebar input { font-size: 1.15em !important; }
68
+ .footer-placeholder { height: 90px; }
69
  </style>
70
  """, unsafe_allow_html=True)
71
  st.markdown("""
 
77
  </div>
78
  """, unsafe_allow_html=True)
79
 
80
+ # Sidebar: audio/voice controls
81
  with st.sidebar:
82
  st.markdown("### Voice Settings & Controls")
83
  selected_voice = st.selectbox(
84
+ "Select assistant voice", list(VOICE_OPTIONS.keys()),
 
85
  index=list(VOICE_OPTIONS.keys()).index(st.session_state["selected_voice"])
86
  )
87
  st.session_state["selected_voice"] = selected_voice
88
 
 
89
  last_audio = st.session_state.get("last_audio_path")
90
  mute_voice = st.session_state.get("mute_voice", False)
 
 
91
  if last_audio and os.path.exists(last_audio):
92
  st.audio(last_audio, format="audio/mp3", autoplay=not mute_voice)
93
  if st.button("๐Ÿ” Replay Voice"):
94
  st.audio(last_audio, format="audio/mp3", autoplay=True)
 
 
95
  if not mute_voice:
96
  if st.button("๐Ÿ”‡ Mute Voice"):
97
  st.session_state["mute_voice"] = True
 
101
  st.session_state["mute_voice"] = False
102
  st.rerun()
103
 
 
104
  def get_or_create_thread_id():
105
  doc_ref = db.collection("users").document(user_id)
106
  doc = doc_ref.get()
 
127
  st.rerun()
128
 
129
  def display_chat_history():
 
130
  messages = db.collection("users").document(user_id).collection("messages").order_by("timestamp").stream()
131
  assistant_icon_html = "<img src='https://raw.githubusercontent.com/AndrewLORTech/lortechwebsite/main/lorain.jpg' width='24' style='vertical-align:middle; border-radius:50%;'/>"
132
+ chat_msgs = []
133
  for msg in list(messages)[::-1]:
134
  data = msg.to_dict()
135
  if data["role"] == "user":
136
+ chat_msgs.append(
137
+ f"<div class='stChatMessage' data-testid='stChatMessage-user'>๐Ÿ‘ค <strong>You:</strong> {data['content']}</div>"
 
138
  )
139
  else:
140
+ chat_msgs.append(
141
+ f"<div class='stChatMessage' data-testid='stChatMessage-assistant'>{assistant_icon_html} <strong>LORAIN:</strong> {data['content']}</div>"
 
142
  )
143
+ st.markdown("".join(chat_msgs[::-1]), unsafe_allow_html=True)
144
 
145
+ # TTS
146
  async def edge_tts_synthesize(text, voice, user_id):
147
  out_path = f"output_{user_id}.mp3"
148
  communicate = edge_tts.Communicate(text, voice)
 
152
  def synthesize_voice(text, voice_key, user_id):
153
  voice = VOICE_OPTIONS[voice_key]
154
  out_path = f"output_{user_id}.mp3"
 
155
  if st.session_state["last_tts_text"] != text or not os.path.exists(out_path) or st.session_state.get("last_voice") != voice:
156
  with st.spinner(f"Generating voice ({voice_key})..."):
157
  asyncio.run(edge_tts_synthesize(text, voice, user_id))
 
160
  st.session_state["last_voice"] = voice
161
  return out_path
162
 
163
+ # --- Chat history and spacer ---
 
164
  display_chat_history()
165
+ st.markdown('<div class="footer-placeholder"></div>', unsafe_allow_html=True)
166
 
167
+ # --- "Floating" Chat Input and Clear Chat button ---
168
  with st.container():
169
+ st.markdown('<div class="footer-fakebar">', unsafe_allow_html=True)
170
+ col1, col2 = st.columns([10, 1])
171
+ user_input = col1.chat_input("Type your message here...")
172
+ if col2.button("๐Ÿ—‘๏ธ", help="Clear Chat", key="clear-chat-bottom"):
173
  clear_chat_history()
174
+ st.markdown('</div>', unsafe_allow_html=True)
175
 
176
  if user_input:
177
  # --- OpenAI Assistant Response ---
178
+ client.beta.threads.messages.create(thread_id=get_or_create_thread_id(), role="user", content=user_input)
179
  save_message("user", user_input)
180
  with st.spinner("Thinking and typing... ๐Ÿ’ญ"):
181
+ run = client.beta.threads.runs.create(thread_id=get_or_create_thread_id(), assistant_id=assistant_id)
182
  while True:
183
+ run_status = client.beta.threads.runs.retrieve(thread_id=get_or_create_thread_id(), run_id=run.id)
184
  if run_status.status == "completed":
185
  break
186
  time.sleep(1)
187
+ messages_response = client.beta.threads.messages.list(thread_id=get_or_create_thread_id())
188
  latest_response = sorted(messages_response.data, key=lambda x: x.created_at)[-1]
189
  assistant_message = latest_response.content[0].text.value
190
  save_message("assistant", assistant_message)
191
 
 
192
  mute_voice = st.session_state.get("mute_voice", False)
193
  audio_path = None
194
  if not mute_voice and assistant_message.strip():
195
  audio_path = synthesize_voice(assistant_message, st.session_state["selected_voice"], user_id)
196
  st.session_state["last_audio_path"] = audio_path
 
197
 
198
  time.sleep(0.2)
199
  st.rerun()