accent-detection / streamlit_app.py
ash-171's picture
Update streamlit_app.py
250e3e4 verified
raw
history blame
3.99 kB
import streamlit as st
from src.tools.accent_tool import AccentAnalyzerTool
from src.app.main_agent import create_agent
from langchain_core.messages import HumanMessage, AIMessage
import re
import os
st.set_page_config(page_title="Accent Analyzer Agent", page_icon="💬", layout="centered")
st.warning("⚠️ High latency(~11min for 0:59s video) due to CPU usage. Once migrated to GPU, response time will improve significantly.")
st.title("English Accent Analyzer (Conversational)")
st.subheader("Ask me to analyze a video URL, e.g.: \n\n> *Analyze this video: https://github.com/ash-171/Data-mp4/raw/refs/heads/main/NWRNVTFlRGlnV0FfNDgwcA_out.mp4*")
@st.cache_resource
def load_tool_and_agent():
tool = AccentAnalyzerTool()
analysis_agent, follow_up_agent = create_agent(tool)
return tool, analysis_agent, follow_up_agent
accent_tool_obj, analysis_agent, follow_up_agent = load_tool_and_agent()
if "chat_history" not in st.session_state:
st.session_state.chat_history = []
if hasattr(accent_tool_obj, "last_transcript") and accent_tool_obj.last_transcript:
prompt_label = "Ask more about the video..."
input_key = "followup"
else:
prompt_label = "Paste your prompt here..."
input_key = "initial"
user_input = st.chat_input(prompt_label, key=input_key)
# Variable to defer assistant response
deferred_response = None
deferred_spinner_msg = ""
# if user_input:
# st.session_state.chat_history.append(HumanMessage(content=user_input))
# if re.search(r'https?://\S+', user_input):
# accent_tool_obj.last_transcript = ""
# deferred_spinner_msg = "Analyzing new video..."
# def run_agent():
# return analysis_agent.invoke(st.session_state.chat_history)[-1].content
# else:
# deferred_spinner_msg = "Responding based on transcript..."
# def run_agent():
# return follow_up_agent.invoke(st.session_state.chat_history).content
# # Run response generation inside spinner after chat is rendered
# def process_response():
# with st.spinner(deferred_spinner_msg):
# try:
# result = run_agent()
# except Exception as e:
# result = f"Error: {str(e)}"
# st.session_state.chat_history.append(AIMessage(content=result))
# st.rerun()
if user_input and not st.session_state.get("processing_started", False):
st.session_state.chat_history.append(HumanMessage(content=user_input))
if re.search(r'https?://\S+', user_input):
accent_tool_obj.last_transcript = ""
st.session_state.deferred_spinner_msg = "Analyzing new video..."
st.session_state.run_agent_type = "analysis"
else:
st.session_state.deferred_spinner_msg = "Responding based on transcript..."
st.session_state.run_agent_type = "followup"
st.session_state.processing_started = True
st.rerun()
for msg in st.session_state.chat_history:
with st.chat_message("user" if isinstance(msg, HumanMessage) else "assistant"):
st.markdown(msg.content)
if st.session_state.get("processing_started", False):
with st.spinner(st.session_state.deferred_spinner_msg):
try:
if st.session_state.run_agent_type == "analysis":
result = analysis_agent.invoke(st.session_state.chat_history)[-1].content
else:
result = follow_up_agent.invoke(st.session_state.chat_history).content
except Exception as e:
result = f"Error: {str(e)}"
st.session_state.chat_history.append(AIMessage(content=result))
st.session_state.processing_started = False
st.rerun()
# Display full chat history (before running spinner)
for msg in st.session_state.chat_history:
with st.chat_message("user" if isinstance(msg, HumanMessage) else "assistant"):
st.markdown(msg.content)
# Only process response at the bottom, after chat is shown
if user_input:
process_response()