accent-detection / src /app /main_agent.py
ash-171's picture
Update src/app/main_agent.py
a182f79 verified
raw
history blame
4.02 kB
# from langchain_core.messages import BaseMessage, AIMessage
# from langchain_core.runnables import RunnableLambda, Runnable
# from langchain_community.llms import Ollama
# from langchain.tools import Tool
# from langgraph.graph import MessageGraph
# import re
# llm = Ollama(model="gemma3", temperature=0.0) # llama3.1
# def create_agent(accent_tool_obj) -> tuple[Runnable, Runnable]:
# accent_tool = Tool(
# name="AccentAnalyzer",
# func=accent_tool_obj.analyze,
# description="Analyze a public MP4 video URL and determine the English accent with transcription."
# )
# def analyze_node(messages: list[BaseMessage]) -> AIMessage:
# last_input = messages[-1].content
# match = re.search(r'https?://\S+', last_input)
# if match:
# url = match.group()
# result = accent_tool.func(url)
# else:
# result = "No valid video URL found in your message."
# return AIMessage(content=result)
# graph = MessageGraph()
# graph.add_node("analyze_accent", RunnableLambda(analyze_node))
# graph.set_entry_point("analyze_accent")
# graph.set_finish_point("analyze_accent")
# analysis_agent = graph.compile()
# # Follow-up agent that uses transcript and responds to questions
# def follow_up_node(messages: list[BaseMessage]) -> AIMessage:
# user_question = messages[-1].content
# transcript = accent_tool_obj.last_transcript or ""
# prompt = f"""You are given this transcript of a video:
# \"\"\"{transcript}\"\"\"
# Now respond to the user's follow-up question: {user_question}
# """
# response = llm.invoke(prompt)
# return AIMessage(content=response)
# follow_up_agent = RunnableLambda(follow_up_node)
# return analysis_agent, follow_up_agent
from langchain_core.messages import BaseMessage, AIMessage
from langchain_core.runnables import RunnableLambda, Runnable
from langchain.tools import Tool
from langgraph.graph import MessageGraph
import re
import torch
from transformers import pipeline
import os
# Load the Gemma 3 model pipeline once
pipe = pipeline("text-generation", model="google/gemma-3-1b-it", device="cuda", torch_dtype=torch.bfloat16)
def create_agent(accent_tool_obj) -> tuple[Runnable, Runnable]:
accent_tool = Tool(
name="AccentAnalyzer",
func=accent_tool_obj.analyze,
description="Analyze a public MP4 video URL and determine the English accent with transcription."
)
def analyze_node(messages: list[BaseMessage]) -> AIMessage:
last_input = messages[-1].content
match = re.search(r'https?://\S+', last_input)
if match:
url = match.group()
result = accent_tool.func(url)
else:
result = "No valid video URL found in your message."
return AIMessage(content=result)
graph = MessageGraph()
graph.add_node("analyze_accent", RunnableLambda(analyze_node))
graph.set_entry_point("analyze_accent")
graph.set_finish_point("analyze_accent")
analysis_agent = graph.compile()
# Follow-up agent that uses transcript and responds to questions
def follow_up_node(messages: list[BaseMessage]) -> AIMessage:
user_question = messages[-1].content
transcript = accent_tool_obj.last_transcript or ""
messages = [
[
{
"role": "system",
"content": [{"type": "text", "text": "You are a helpful assistant."},]
},
{
"role": "user",
"content": [{"type": "text", "text": "Analyse the transcript. "},]
},
],
]
outputs = pipe(prompt, max_new_tokens=256, do_sample=False)
response_text = outputs[0]['generated_text']
return AIMessage(content=response_text)
follow_up_agent = RunnableLambda(follow_up_node)
return analysis_agent, follow_up_agent