IAMTFRMZA's picture
Update app.py
7ba5a66 verified
raw
history blame
7.48 kB
import streamlit as st
import asyncio
import edge_tts
import time
import os
import uuid
import firebase_admin
from firebase_admin import credentials, firestore
from openai import OpenAI
# ---- Firebase setup ----
if not firebase_admin._apps:
cred = credentials.Certificate("firebase-service-account.json")
firebase_admin.initialize_app(cred)
db = firestore.client()
# ---- OpenAI setup ----
openai_key = os.getenv("openai_key")
assistant_id = os.getenv("assistant_id")
client = OpenAI(api_key=openai_key)
# ---- Edge TTS voices ----
VOICE_OPTIONS = {
"Jenny (US, Female)": "en-US-JennyNeural",
"Aria (US, Female)": "en-US-AriaNeural",
"Ryan (UK, Male)": "en-GB-RyanNeural",
"Natasha (AU, Female)": "en-AU-NatashaNeural",
"William (AU, Male)": "en-AU-WilliamNeural",
"Libby (UK, Female)": "en-GB-LibbyNeural",
"Leah (SA, Female)": "en-ZA-LeahNeural",
"Luke (SA, Male)": "en-ZA-LukeNeural"
}
st.set_page_config(page_title="LOR Technologies AI Assistant", layout="wide")
if "user_id" not in st.session_state:
st.session_state["user_id"] = str(uuid.uuid4())
user_id = st.session_state["user_id"]
if "mute_voice" not in st.session_state:
st.session_state["mute_voice"] = False
if "last_tts_text" not in st.session_state:
st.session_state["last_tts_text"] = ""
if "last_audio_path" not in st.session_state:
st.session_state["last_audio_path"] = ""
if "selected_voice" not in st.session_state:
st.session_state["selected_voice"] = "Jenny (US, Female)"
# --- Branding & Styling ---
st.markdown("""
<style>
.block-container {padding-top: 1rem; padding-bottom: 0rem;}
header {visibility: hidden;}
.stChatMessage { max-width: 85%; border-radius: 12px; padding: 8px; margin-bottom: 10px; }
.stChatMessage[data-testid="stChatMessage-user"] { background: #f0f0f0; color: #000000; }
.stChatMessage[data-testid="stChatMessage-assistant"] { background: #e3f2fd; color: #000000; }
.lt-logo { vertical-align: middle; }
.st-emotion-cache-1avcm0n { justify-content: flex-end !important; }
.stChatInputContainer { position: fixed !important; bottom: 0; width: 80vw; z-index: 100; left: 10vw; background: #11131a; }
</style>
""", unsafe_allow_html=True)
st.markdown("""
<div style='text-align: center; margin-top: 20px; margin-bottom: -10px;'>
<span style='display: inline-flex; align-items: center; gap: 8px;'>
<img src='https://lortechnologies.com/wp-content/uploads/2023/03/LOR-Online-Logo.svg' width='100' class='lor-logo'/>
<span style='font-size: 12px; color: gray;'>Powered by LOR Technologies</span>
</span>
</div>
""", unsafe_allow_html=True)
# --- Sidebar: All audio/controls here ---
with st.sidebar:
st.markdown("### Voice Settings & Controls")
selected_voice = st.selectbox("Select assistant voice", list(VOICE_OPTIONS.keys()), index=list(VOICE_OPTIONS.keys()).index(st.session_state["selected_voice"]))
st.session_state["selected_voice"] = selected_voice
# Audio player, always present if we have an mp3
last_audio = st.session_state.get("last_audio_path")
mute_voice = st.session_state.get("mute_voice", False)
# Replay button and audio player
if last_audio and os.path.exists(last_audio):
# Autoplay if this was just generated, else manual play
st.audio(last_audio, format="audio/mp3", autoplay=not mute_voice)
if st.button("πŸ” Replay Voice"):
st.audio(last_audio, format="audio/mp3", autoplay=True)
# Mute/Unmute
if not mute_voice:
if st.button("πŸ”‡ Mute Voice"):
st.session_state["mute_voice"] = True
st.rerun()
else:
if st.button("πŸ”Š Unmute Voice"):
st.session_state["mute_voice"] = False
st.rerun()
# --- Firestore helpers ---
def get_or_create_thread_id():
doc_ref = db.collection("users").document(user_id)
doc = doc_ref.get()
if doc.exists:
return doc.to_dict()["thread_id"]
else:
thread = client.beta.threads.create()
doc_ref.set({"thread_id": thread.id, "created_at": firestore.SERVER_TIMESTAMP})
return thread.id
def save_message(role, content):
db.collection("users").document(user_id).collection("messages").add({
"role": role,
"content": content,
"timestamp": firestore.SERVER_TIMESTAMP
})
def display_chat_history():
messages = db.collection("users").document(user_id).collection("messages").order_by("timestamp").stream()
assistant_icon_html = "<img src='https://raw.githubusercontent.com/AndrewLORTech/lortechwebsite/main/lorain.jpg' width='24' style='vertical-align:middle; border-radius:50%;'/>"
for msg in list(messages)[::-1]:
data = msg.to_dict()
if data["role"] == "user":
st.markdown(f"<div class='stChatMessage' data-testid='stChatMessage-user'>πŸ‘€ <strong>You:</strong> {data['content']}</div>", unsafe_allow_html=True)
else:
st.markdown(f"<div class='stChatMessage' data-testid='stChatMessage-assistant'>{assistant_icon_html} <strong>LORAIN:</strong> {data['content']}</div>", unsafe_allow_html=True)
# --- Edge TTS synth ---
async def edge_tts_synthesize(text, voice, user_id):
out_path = f"output_{user_id}.mp3"
communicate = edge_tts.Communicate(text, voice)
await communicate.save(out_path)
return out_path
def synthesize_voice(text, voice_key, user_id):
voice = VOICE_OPTIONS[voice_key]
out_path = f"output_{user_id}.mp3"
# Only synthesize if text changed or file missing or voice changed
if st.session_state["last_tts_text"] != text or not os.path.exists(out_path) or st.session_state.get("last_voice") != voice:
with st.spinner(f"Generating voice ({voice_key})..."):
asyncio.run(edge_tts_synthesize(text, voice, user_id))
st.session_state["last_tts_text"] = text
st.session_state["last_audio_path"] = out_path
st.session_state["last_voice"] = voice
return out_path
# --- Main Chat UI (text only!) ---
thread_id = get_or_create_thread_id()
display_chat_history()
# --- Static Chat Input at Bottom ---
user_input = st.chat_input("Type your message here...")
if user_input:
# --- OpenAI Assistant Response ---
client.beta.threads.messages.create(thread_id=thread_id, role="user", content=user_input)
save_message("user", user_input)
with st.spinner("Thinking and typing... πŸ’­"):
run = client.beta.threads.runs.create(thread_id=thread_id, assistant_id=assistant_id)
while True:
run_status = client.beta.threads.runs.retrieve(thread_id=thread_id, run_id=run.id)
if run_status.status == "completed":
break
time.sleep(1)
messages_response = client.beta.threads.messages.list(thread_id=thread_id)
latest_response = sorted(messages_response.data, key=lambda x: x.created_at)[-1]
assistant_message = latest_response.content[0].text.value
save_message("assistant", assistant_message)
# --- TTS: Speak unless muted ---
mute_voice = st.session_state.get("mute_voice", False)
audio_path = None
if not mute_voice and assistant_message.strip():
audio_path = synthesize_voice(assistant_message, st.session_state["selected_voice"], user_id)
st.session_state["last_audio_path"] = audio_path
# No audio in main area! Sidebar will autoplay due to above code.
time.sleep(0.2)
st.rerun()