Spaces:
Sleeping
Sleeping
# from langchain_core.messages import BaseMessage, AIMessage | |
# from langchain_core.runnables import RunnableLambda, Runnable | |
# from langchain_community.llms import Ollama | |
# from langchain.tools import Tool | |
# from langgraph.graph import MessageGraph | |
# import re | |
# llm = Ollama(model="gemma3", temperature=0.0) # llama3.1 | |
# def create_agent(accent_tool_obj) -> tuple[Runnable, Runnable]: | |
# accent_tool = Tool( | |
# name="AccentAnalyzer", | |
# func=accent_tool_obj.analyze, | |
# description="Analyze a public MP4 video URL and determine the English accent with transcription." | |
# ) | |
# def analyze_node(messages: list[BaseMessage]) -> AIMessage: | |
# last_input = messages[-1].content | |
# match = re.search(r'https?://\S+', last_input) | |
# if match: | |
# url = match.group() | |
# result = accent_tool.func(url) | |
# else: | |
# result = "No valid video URL found in your message." | |
# return AIMessage(content=result) | |
# graph = MessageGraph() | |
# graph.add_node("analyze_accent", RunnableLambda(analyze_node)) | |
# graph.set_entry_point("analyze_accent") | |
# graph.set_finish_point("analyze_accent") | |
# analysis_agent = graph.compile() | |
# # Follow-up agent that uses transcript and responds to questions | |
# def follow_up_node(messages: list[BaseMessage]) -> AIMessage: | |
# user_question = messages[-1].content | |
# transcript = accent_tool_obj.last_transcript or "" | |
# prompt = f"""You are given this transcript of a video: | |
# \"\"\"{transcript}\"\"\" | |
# Now respond to the user's follow-up question: {user_question} | |
# """ | |
# response = llm.invoke(prompt) | |
# return AIMessage(content=response) | |
# follow_up_agent = RunnableLambda(follow_up_node) | |
# return analysis_agent, follow_up_agent | |
from langchain_core.messages import BaseMessage, AIMessage | |
from langchain_core.runnables import RunnableLambda, Runnable | |
from langchain.tools import Tool | |
from langgraph.graph import MessageGraph | |
import re | |
import torch | |
from transformers import pipeline | |
# Load the Gemma 3 model pipeline once | |
gemma_pipeline = pipeline( | |
task="text-generation", | |
model="google/gemma-3-4b-it", # or your preferred Gemma 3 model | |
device=0, # set -1 for CPU, 0 or other for GPU | |
torch_dtype=torch.bfloat16, | |
use_auth_token=os.getenv("HF_TOKEN") | |
) | |
def create_agent(accent_tool_obj) -> tuple[Runnable, Runnable]: | |
accent_tool = Tool( | |
name="AccentAnalyzer", | |
func=accent_tool_obj.analyze, | |
description="Analyze a public MP4 video URL and determine the English accent with transcription." | |
) | |
def analyze_node(messages: list[BaseMessage]) -> AIMessage: | |
last_input = messages[-1].content | |
match = re.search(r'https?://\S+', last_input) | |
if match: | |
url = match.group() | |
result = accent_tool.func(url) | |
else: | |
result = "No valid video URL found in your message." | |
return AIMessage(content=result) | |
graph = MessageGraph() | |
graph.add_node("analyze_accent", RunnableLambda(analyze_node)) | |
graph.set_entry_point("analyze_accent") | |
graph.set_finish_point("analyze_accent") | |
analysis_agent = graph.compile() | |
# Follow-up agent that uses transcript and responds to questions | |
def follow_up_node(messages: list[BaseMessage]) -> AIMessage: | |
user_question = messages[-1].content | |
transcript = accent_tool_obj.last_transcript or "" | |
prompt = f"""You are given this transcript of a video: | |
\"\"\"{transcript}\"\"\" | |
Now respond to the user's follow-up question: {user_question} | |
""" | |
# Use the pipeline to generate the response text | |
# pipeline output is a list of dicts with 'generated_text' | |
outputs = gemma_pipeline(prompt, max_new_tokens=256, do_sample=False) | |
response_text = outputs[0]['generated_text'] | |
return AIMessage(content=response_text) | |
follow_up_agent = RunnableLambda(follow_up_node) | |
return analysis_agent, follow_up_agent | |