accent-detection / src /app /main_agent.py
ash-171's picture
Update src/app/main_agent.py
055c22e verified
from langchain_core.messages import BaseMessage, AIMessage
from langchain_core.runnables import RunnableLambda, Runnable
from langchain_community.llms import Ollama
from langchain.tools import Tool
from langgraph.graph import MessageGraph
import re
llm = Ollama(model="gemma3:1b", temperature=0.0) # llama3.1
def create_agent(accent_tool_obj) -> tuple[Runnable, Runnable]:
accent_tool = Tool(
name="AccentAnalyzer",
func=accent_tool_obj.analyze,
description="Analyze a public MP4 video URL and determine the English accent with transcription."
)
def analyze_node(messages: list[BaseMessage]) -> AIMessage:
last_input = messages[-1].content
match = re.search(r'https?://\S+', last_input)
if match:
url = match.group()
result = accent_tool.func(url)
else:
result = "No valid video URL found in your message."
return AIMessage(content=result)
graph = MessageGraph()
graph.add_node("analyze_accent", RunnableLambda(analyze_node))
graph.set_entry_point("analyze_accent")
graph.set_finish_point("analyze_accent")
analysis_agent = graph.compile()
# Follow-up agent that uses transcript and responds to questions
def follow_up_node(messages: list[BaseMessage]) -> AIMessage:
user_question = messages[-1].content
transcript = accent_tool_obj.last_transcript or ""
prompt = f"""You are given this transcript of a video:
\"\"\"{transcript}\"\"\"
Now respond to the user's follow-up question: {user_question}
"""
response = llm.invoke(prompt)
return AIMessage(content=response)
follow_up_agent = RunnableLambda(follow_up_node)
return analysis_agent, follow_up_agent