Spaces:
Sleeping
Sleeping
File size: 6,183 Bytes
0187514 4def1b3 0187514 4def1b3 91dc4f7 4def1b3 1f0e096 4def1b3 0187514 4def1b3 0187514 4def1b3 0187514 4def1b3 0187514 4def1b3 0187514 4def1b3 0187514 4def1b3 0187514 4def1b3 0187514 4def1b3 0187514 4def1b3 0187514 4def1b3 0187514 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 |
# import streamlit as st
# from src.tools.accent_tool import AccentAnalyzerTool
# from src.app.main_agent import create_agent
# from langchain_core.messages import HumanMessage, AIMessage
# import re
# st.set_page_config(page_title="Accent Analyzer Agent", page_icon="💬", layout="centered")
# st.warning("⚠️ High latency due to CPU usage. Once migrated to GPU, response time will improve significantly.")
# st.title("English Accent Analyzer (Conversational)")
# st.subheader("Ask me to analyze a video URL, e.g.: \n\n `Analyze this video: https://github.com/ash-171/Data-mp4/raw/refs/heads/main/NWRNVTFlRGlnV0FfNDgwcA_out.mp4`")
# @st.cache_resource
# def load_tool_and_agent():
# tool = AccentAnalyzerTool()
# analysis_agent, follow_up_agent = create_agent(tool)
# return tool, analysis_agent, follow_up_agent
# accent_tool_obj, analysis_agent, follow_up_agent = load_tool_and_agent()
# if "chat_history" not in st.session_state:
# st.session_state.chat_history = []
# if hasattr(accent_tool_obj, "last_transcript") and accent_tool_obj.last_transcript:
# prompt_label = "Ask more about the video..."
# input_key = "followup"
# else:
# prompt_label = "Paste your prompt here..."
# input_key = "initial"
# user_input = st.chat_input(prompt_label, key=input_key)
# # Variable to defer assistant response
# deferred_response = None
# deferred_spinner_msg = ""
# if user_input:
# st.session_state.chat_history.append(HumanMessage(content=user_input))
# if re.search(r'https?://\S+', user_input):
# accent_tool_obj.last_transcript = ""
# deferred_spinner_msg = "Analyzing new video..."
# def run_agent():
# return analysis_agent.invoke(st.session_state.chat_history)[-1].content
# else:
# deferred_spinner_msg = "Responding based on transcript..."
# def run_agent():
# return follow_up_agent.invoke(st.session_state.chat_history).content
# # Run response generation inside spinner after chat is rendered
# def process_response():
# with st.spinner(deferred_spinner_msg):
# try:
# result = run_agent()
# except Exception as e:
# result = f"Error: {str(e)}"
# st.session_state.chat_history.append(AIMessage(content=result))
# st.rerun()
# # Display full chat history (before running spinner)
# for msg in st.session_state.chat_history:
# with st.chat_message("user" if isinstance(msg, HumanMessage) else "assistant"):
# st.markdown(msg.content)
# # Only process response at the bottom, after chat is shown
# if user_input:
# process_response()
import streamlit as st
from src.tools.accent_tool import AccentAnalyzerTool
from src.app.main_agent import create_agent
from langchain_core.messages import HumanMessage, AIMessage
import re
import os
st.set_page_config(page_title="Accent Analyzer Agent", page_icon="💬", layout="centered")
st.warning("⚠️ High latency due to CPU usage. Once migrated to GPU, response time will improve significantly.")
st.title("English Accent Analyzer (Conversational)")
st.subheader("Ask me to analyze a video URL, e.g.: \n\n `Analyze this video: https://github.com/ash-171/Data-mp4/raw/refs/heads/main/NWRNVTFlRGlnV0FfNDgwcA_out.mp4`")
if "chat_history" not in st.session_state:
st.session_state.chat_history = []
if "last_transcript_available" not in st.session_state:
st.session_state.last_transcript_available = False
if "processing_input" not in st.session_state:
st.session_state.processing_input = False
@st.cache_resource
def load_tool_and_agent_cached():
with st.spinner("Loading AI models and tools... This might take a moment."):
if not os.environ.get("HF_TOKEN"):
st.warning("HF_TOKEN environment variable not found. Some Hugging Face models might require authentication and may fail to load.")
tool = AccentAnalyzerTool()
analysis_agent, follow_up_agent = create_agent(tool)
return tool, analysis_agent, follow_up_agent
accent_tool_obj, analysis_agent, follow_up_agent = load_tool_and_agent_cached()
if hasattr(accent_tool_obj, "last_transcript") and accent_tool_obj.last_transcript:
st.session_state.last_transcript_available = True
else:
st.session_state.last_transcript_available = False
prompt_label = "Ask more about the video..." if st.session_state.last_transcript_available else "Paste your prompt here..."
user_input = st.chat_input(prompt_label, disabled=st.session_state.processing_input)
if user_input and not st.session_state.processing_input:
st.session_state.processing_input = True
st.session_state.chat_history.append(HumanMessage(content=user_input))
if re.search(r'https?://\S+', user_input):
accent_tool_obj.last_transcript = ""
st.session_state.last_transcript_available = False
agent_to_run = analysis_agent
spinner_msg = "Analyzing new video and transcribing audio..."
else:
agent_to_run = follow_up_agent
spinner_msg = "Generating response based on transcript..."
with st.spinner(spinner_msg):
try:
if agent_to_run == analysis_agent:
response_content = agent_to_run.invoke(st.session_state.chat_history)[-1].content
else:
response_content = follow_up_agent.invoke(st.session_state.chat_history).content
if agent_to_run == analysis_agent and hasattr(accent_tool_obj, "last_transcript") and accent_tool_obj.last_transcript:
st.session_state.last_transcript_available = True
except Exception as e:
response_content = f"Error: {str(e)}"
st.error(f"An error occurred during processing: {e}")
st.session_state.chat_history.append(AIMessage(content=response_content))
st.session_state.processing_input = False
st.rerun()
for msg in st.session_state.chat_history:
with st.chat_message("user" if isinstance(msg, HumanMessage) else "assistant"):
st.markdown(msg.content)
if not st.session_state.chat_history:
st.info("Start by pasting a public MP4 video URL above!") |