Spaces:
Sleeping
Sleeping
File size: 6,124 Bytes
0187514 bc1f3a5 0187514 bc1f3a5 4def1b3 bc1f3a5 4def1b3 bc1f3a5 4def1b3 91dc4f7 4def1b3 1f0e096 4def1b3 bc1f3a5 4def1b3 bc1f3a5 0187514 bc1f3a5 4def1b3 bc1f3a5 4def1b3 bc1f3a5 4def1b3 bc1f3a5 4def1b3 bc1f3a5 4def1b3 bc1f3a5 4def1b3 bc1f3a5 4def1b3 bc1f3a5 4def1b3 bc1f3a5 4def1b3 bc1f3a5 0187514 bc1f3a5 4def1b3 bc1f3a5 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 |
# import streamlit as st
# from src.tools.accent_tool import AccentAnalyzerTool
# from src.app.main_agent import create_agent
# from langchain_core.messages import HumanMessage, AIMessage
# import re
# st.set_page_config(page_title="Accent Analyzer Agent", page_icon="💬", layout="centered")
# st.warning("⚠️ High latency due to CPU usage. Once migrated to GPU, response time will improve significantly.")
# st.title("English Accent Analyzer (Conversational)")
# st.subheader("Ask me to analyze a video URL, e.g.: \n\n `Analyze this video: https://github.com/ash-171/Data-mp4/raw/refs/heads/main/NWRNVTFlRGlnV0FfNDgwcA_out.mp4`")
# @st.cache_resource
# def load_tool_and_agent():
# with st.spinner("Loading AI models and tools... This might take a moment."):
# if not os.environ.get("HF_TOKEN"):
# st.warning("HF_TOKEN environment variable not found. Some Hugging Face models might require authentication and may fail to load.")
# tool = AccentAnalyzerTool()
# analysis_agent, follow_up_agent = create_agent(tool)
# return tool, analysis_agent, follow_up_agent
# accent_tool_obj, analysis_agent, follow_up_agent = load_tool_and_agent()
# if "chat_history" not in st.session_state:
# st.session_state.chat_history = []
# if hasattr(accent_tool_obj, "last_transcript") and accent_tool_obj.last_transcript:
# prompt_label = "Ask more about the video..."
# input_key = "followup"
# else:
# prompt_label = "Paste your prompt here..."
# input_key = "initial"
# user_input = st.chat_input(prompt_label, key=input_key)
# # Variable to defer assistant response
# deferred_response = None
# deferred_spinner_msg = ""
# if user_input:
# st.session_state.chat_history.append(HumanMessage(content=user_input))
# if re.search(r'https?://\S+', user_input):
# accent_tool_obj.last_transcript = ""
# deferred_spinner_msg = "Analyzing new video..."
# def run_agent():
# return analysis_agent.invoke(st.session_state.chat_history)[-1].content
# else:
# deferred_spinner_msg = "Responding based on transcript..."
# def run_agent():
# return follow_up_agent.invoke(st.session_state.chat_history).content
# # Run response generation inside spinner after chat is rendered
# def process_response():
# with st.spinner(deferred_spinner_msg):
# try:
# result = run_agent()
# except Exception as e:
# result = f"Error: {str(e)}"
# st.session_state.chat_history.append(AIMessage(content=result))
# st.rerun()
# # Display full chat history (before running spinner)
# for msg in st.session_state.chat_history:
# with st.chat_message("user" if isinstance(msg, HumanMessage) else "assistant"):
# st.markdown(msg.content)
# # Only process response at the bottom, after chat is shown
# if user_input:
# process_response()
import streamlit as st
import os
import re
from src.tools.accent_tool import AccentAnalyzerTool
from src.app.main_agent import create_agent
from langchain_core.messages import HumanMessage, AIMessage
# Page config
st.set_page_config(
page_title="Accent Analyzer Agent",
page_icon="💬",
layout="centered"
)
st.warning("⚠️ High latency due to CPU usage. Once migrated to GPU, response time will improve significantly.")
st.title("English Accent Analyzer (Conversational)")
st.subheader("Ask me to analyze a video URL, e.g.: \n\n `Analyze this video: https://github.com/ash-171/Data-mp4/raw/refs/heads/main/NWRNVTFlRGlnV0FfNDgwcA_out.mp4`")
# Load tools and agents
@st.cache_resource
def load_tool_and_agent():
with st.spinner("Loading AI models and tools... This might take a moment."):
if not os.environ.get("HF_TOKEN"):
st.warning("HF_TOKEN environment variable not found. Some Hugging Face models might require authentication and may fail to load.")
tool = AccentAnalyzerTool()
analysis_agent, follow_up_agent = create_agent(tool)
return tool, analysis_agent, follow_up_agent
accent_tool_obj, analysis_agent, follow_up_agent = load_tool_and_agent()
# Initialize session state variables
if "chat_history" not in st.session_state:
st.session_state.chat_history = []
if "pending_input" not in st.session_state:
st.session_state.pending_input = None
if "pending_spinner_msg" not in st.session_state:
st.session_state.pending_spinner_msg = ""
# Determine prompt type
if hasattr(accent_tool_obj, "last_transcript") and accent_tool_obj.last_transcript:
prompt_label = "Ask more about the video..."
input_key = "followup"
else:
prompt_label = "Paste your prompt here..."
input_key = "initial"
# Chat input field
user_input = st.chat_input(prompt_label, key=input_key)
# Handle new user input (just queue for now)
if user_input:
st.session_state.chat_history.append(HumanMessage(content=user_input))
st.session_state.pending_input = user_input
if re.search(r'https?://\S+', user_input):
accent_tool_obj.last_transcript = ""
st.session_state.pending_spinner_msg = "Analyzing new video..."
else:
st.session_state.pending_spinner_msg = "Responding based on transcript..."
st.rerun()
# Display chat history
for msg in st.session_state.chat_history:
with st.chat_message("user" if isinstance(msg, HumanMessage) else "assistant"):
st.markdown(msg.content)
# If input is queued, process it now
if st.session_state.pending_input:
with st.spinner(st.session_state.pending_spinner_msg):
try:
if re.search(r'https?://\S+', st.session_state.pending_input):
result = analysis_agent.invoke(st.session_state.chat_history)[-1].content
else:
result = follow_up_agent.invoke(st.session_state.chat_history).content
except Exception as e:
result = f"Error: {str(e)}"
st.session_state.chat_history.append(AIMessage(content=result))
# Clear flags
st.session_state.pending_input = None
st.session_state.pending_spinner_msg = ""
st.rerun()
|