Spaces:
Sleeping
Sleeping
File size: 2,841 Bytes
4def1b3 c9c015d 4def1b3 c9c015d 4def1b3 91dc4f7 c9c015d 4def1b3 1f0e096 4def1b3 bc1f3a5 0187514 bc1f3a5 4def1b3 bc1f3a5 4def1b3 bc1f3a5 4def1b3 bc1f3a5 4def1b3 bc1f3a5 4def1b3 bc1f3a5 4def1b3 c9c015d bc1f3a5 c9c015d 4def1b3 bc1f3a5 c9c015d 4def1b3 c9c015d 4def1b3 c9c015d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 |
import streamlit as st
from src.tools.accent_tool import AccentAnalyzerTool
from src.app.main_agent import create_agent
from langchain_core.messages import HumanMessage, AIMessage
import re
st.set_page_config(page_title="Accent Analyzer Agent", page_icon="💬", layout="centered")
st.warning("⚠️ High latency due to CPU usage. Once migrated to GPU, response time will improve significantly.")
st.title("English Accent Analyzer (Conversational)")
st.subheader("Ask me to analyze a video URL, e.g.: \n\n `Analyze this video: https://github.com/ash-171/Data-mp4/raw/refs/heads/main/NWRNVTFlRGlnV0FfNDgwcA_out.mp4`")
@st.cache_resource
def load_tool_and_agent():
with st.spinner("Loading AI models and tools... This might take a moment."):
if not os.environ.get("HF_TOKEN"):
st.warning("HF_TOKEN environment variable not found. Some Hugging Face models might require authentication and may fail to load.")
tool = AccentAnalyzerTool()
analysis_agent, follow_up_agent = create_agent(tool)
return tool, analysis_agent, follow_up_agent
accent_tool_obj, analysis_agent, follow_up_agent = load_tool_and_agent()
if "chat_history" not in st.session_state:
st.session_state.chat_history = []
if hasattr(accent_tool_obj, "last_transcript") and accent_tool_obj.last_transcript:
prompt_label = "Ask more about the video..."
input_key = "followup"
else:
prompt_label = "Paste your prompt here..."
input_key = "initial"
user_input = st.chat_input(prompt_label, key=input_key)
# Variable to defer assistant response
deferred_response = None
deferred_spinner_msg = ""
if user_input:
st.session_state.chat_history.append(HumanMessage(content=user_input))
if re.search(r'https?://\S+', user_input):
accent_tool_obj.last_transcript = ""
deferred_spinner_msg = "Analyzing new video..."
def run_agent():
return analysis_agent.invoke(st.session_state.chat_history)[-1].content
else:
deferred_spinner_msg = "Responding based on transcript..."
def run_agent():
return follow_up_agent.invoke(st.session_state.chat_history).content
# Run response generation inside spinner after chat is rendered
def process_response():
with st.spinner(deferred_spinner_msg):
try:
result = run_agent()
except Exception as e:
result = f"Error: {str(e)}"
st.session_state.chat_history.append(AIMessage(content=result))
st.rerun()
# Display full chat history (before running spinner)
for msg in st.session_state.chat_history:
with st.chat_message("user" if isinstance(msg, HumanMessage) else "assistant"):
st.markdown(msg.content)
# Only process response at the bottom, after chat is shown
if user_input:
process_response()
|