Spaces:
Sleeping
Sleeping
File size: 4,022 Bytes
20cf0cb 5a8c370 20cf0cb bf21f24 5a8c370 20cf0cb a182f79 5a8c370 a182f79 20cf0cb 5a8c370 20cf0cb 5a8c370 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 |
# from langchain_core.messages import BaseMessage, AIMessage
# from langchain_core.runnables import RunnableLambda, Runnable
# from langchain_community.llms import Ollama
# from langchain.tools import Tool
# from langgraph.graph import MessageGraph
# import re
# llm = Ollama(model="gemma3", temperature=0.0) # llama3.1
# def create_agent(accent_tool_obj) -> tuple[Runnable, Runnable]:
# accent_tool = Tool(
# name="AccentAnalyzer",
# func=accent_tool_obj.analyze,
# description="Analyze a public MP4 video URL and determine the English accent with transcription."
# )
# def analyze_node(messages: list[BaseMessage]) -> AIMessage:
# last_input = messages[-1].content
# match = re.search(r'https?://\S+', last_input)
# if match:
# url = match.group()
# result = accent_tool.func(url)
# else:
# result = "No valid video URL found in your message."
# return AIMessage(content=result)
# graph = MessageGraph()
# graph.add_node("analyze_accent", RunnableLambda(analyze_node))
# graph.set_entry_point("analyze_accent")
# graph.set_finish_point("analyze_accent")
# analysis_agent = graph.compile()
# # Follow-up agent that uses transcript and responds to questions
# def follow_up_node(messages: list[BaseMessage]) -> AIMessage:
# user_question = messages[-1].content
# transcript = accent_tool_obj.last_transcript or ""
# prompt = f"""You are given this transcript of a video:
# \"\"\"{transcript}\"\"\"
# Now respond to the user's follow-up question: {user_question}
# """
# response = llm.invoke(prompt)
# return AIMessage(content=response)
# follow_up_agent = RunnableLambda(follow_up_node)
# return analysis_agent, follow_up_agent
from langchain_core.messages import BaseMessage, AIMessage
from langchain_core.runnables import RunnableLambda, Runnable
from langchain.tools import Tool
from langgraph.graph import MessageGraph
import re
import torch
from transformers import pipeline
import os
# Load the Gemma 3 model pipeline once
pipe = pipeline("text-generation", model="google/gemma-3-1b-it", device="cuda", torch_dtype=torch.bfloat16)
def create_agent(accent_tool_obj) -> tuple[Runnable, Runnable]:
accent_tool = Tool(
name="AccentAnalyzer",
func=accent_tool_obj.analyze,
description="Analyze a public MP4 video URL and determine the English accent with transcription."
)
def analyze_node(messages: list[BaseMessage]) -> AIMessage:
last_input = messages[-1].content
match = re.search(r'https?://\S+', last_input)
if match:
url = match.group()
result = accent_tool.func(url)
else:
result = "No valid video URL found in your message."
return AIMessage(content=result)
graph = MessageGraph()
graph.add_node("analyze_accent", RunnableLambda(analyze_node))
graph.set_entry_point("analyze_accent")
graph.set_finish_point("analyze_accent")
analysis_agent = graph.compile()
# Follow-up agent that uses transcript and responds to questions
def follow_up_node(messages: list[BaseMessage]) -> AIMessage:
user_question = messages[-1].content
transcript = accent_tool_obj.last_transcript or ""
messages = [
[
{
"role": "system",
"content": [{"type": "text", "text": "You are a helpful assistant."},]
},
{
"role": "user",
"content": [{"type": "text", "text": "Analyse the transcript. "},]
},
],
]
outputs = pipe(prompt, max_new_tokens=256, do_sample=False)
response_text = outputs[0]['generated_text']
return AIMessage(content=response_text)
follow_up_agent = RunnableLambda(follow_up_node)
return analysis_agent, follow_up_agent
|