wt002's picture
Update agent.py
92149c4 verified
raw
history blame
14.7 kB
# agent.py
import os
from dotenv import load_dotenv
from langgraph.graph import START, StateGraph, MessagesState
from langgraph.prebuilt import tools_condition
from langgraph.prebuilt import ToolNode
from langchain_google_genai import ChatGoogleGenerativeAI
from langchain_groq import ChatGroq
from langchain_huggingface import ChatHuggingFace, HuggingFaceEndpoint, HuggingFaceEmbeddings
from langchain_community.tools.tavily_search import TavilySearchResults
from langchain_community.document_loaders import WikipediaLoader
from langchain_community.document_loaders import ArxivLoader
from langchain_community.vectorstores import SupabaseVectorStore
from langchain_core.messages import SystemMessage, HumanMessage
from langchain_core.tools import tool
from langchain.tools.retriever import create_retriever_tool
from supabase.client import Client, create_client
from sentence_transformers import SentenceTransformer
from langchain.embeddings.base import Embeddings
from typing import List
import numpy as np
import yaml
import pandas as pd
import uuid
import requests
import json
from langchain_core.documents import Document
from langchain_community.vectorstores import FAISS
from langchain_community.embeddings import HuggingFaceEmbeddings
from youtube_transcript_api import YouTubeTranscriptApi
from youtube_transcript_api._errors import TranscriptsDisabled, VideoUnavailable
import re
from langchain_community.document_loaders import TextLoader, PyMuPDFLoader
from docx import Document as DocxDocument
import openpyxl
from io import StringIO
from transformers import BertTokenizer, BertModel
import torch
load_dotenv()
@tool
def multiply(a: int, b: int) -> int:
"""Multiply two numbers.
Args:
a: first int
b: second int
"""
return a * b
@tool
def add(a: int, b: int) -> int:
"""Add two numbers.
Args:
a: first int
b: second int
"""
return a + b
@tool
def subtract(a: int, b: int) -> int:
"""Subtract two numbers.
Args:
a: first int
b: second int
"""
return a - b
@tool
def divide(a: int, b: int) -> int:
"""Divide two numbers.
Args:
a: first int
b: second int
"""
if b == 0:
raise ValueError("Cannot divide by zero.")
return a / b
@tool
def modulus(a: int, b: int) -> int:
"""Get the modulus of two numbers.
Args:
a: first int
b: second int
"""
return a % b
@tool
def wiki_search(query: str) -> str:
"""Search Wikipedia for a query and return maximum 2 results.
Args:
query: The search query."""
search_docs = WikipediaLoader(query=query, load_max_docs=2).load()
formatted_search_docs = "\n\n---\n\n".join(
[
f'<Document source="{doc.metadata["source"]}" page="{doc.metadata.get("page", "")}"/>\n{doc.page_content}\n</Document>'
for doc in search_docs
])
return {"wiki_results": formatted_search_docs}
@tool
def web_search(query: str) -> str:
"""Search Tavily for a query and return maximum 3 results.
Args:
query: The search query."""
search_docs = TavilySearchResults(max_results=3).invoke(query=query)
formatted_search_docs = "\n\n---\n\n".join(
[
f'<Document source="{doc.metadata["source"]}" page="{doc.metadata.get("page", "")}"/>\n{doc.page_content}\n</Document>'
for doc in search_docs
])
return {"web_results": formatted_search_docs}
@tool
def arvix_search(query: str) -> str:
"""Search Arxiv for a query and return maximum 3 result.
Args:
query: The search query."""
search_docs = ArxivLoader(query=query, load_max_docs=3).load()
formatted_search_docs = "\n\n---\n\n".join(
[
f'<Document source="{doc.metadata["source"]}" page="{doc.metadata.get("page", "")}"/>\n{doc.page_content[:1000]}\n</Document>'
for doc in search_docs
])
return {"arvix_results": formatted_search_docs}
@tool
def analyze_attachment(file_path: str) -> str:
"""
Analyzes attachments including PDF, TXT, DOCX, and XLSX files and returns text content.
Args:
file_path: Local path to the attachment.
"""
if not os.path.exists(file_path):
return f"File not found: {file_path}"
if file_path.lower().endswith(".pdf"):
loader = PyMuPDFLoader(file_path)
documents = loader.load()
content = "\n\n".join([doc.page_content for doc in documents])
elif file_path.lower().endswith(".txt"):
loader = TextLoader(file_path)
documents = loader.load()
content = "\n\n".join([doc.page_content for doc in documents])
elif file_path.lower().endswith(".docx"):
doc = DocxDocument(file_path)
content = "\n".join([para.text for para in doc.paragraphs])
elif file_path.lower().endswith(".xlsx"):
wb = openpyxl.load_workbook(file_path, data_only=True)
content = ""
for sheet in wb:
content += f"Sheet: {sheet.title}\n"
for row in sheet.iter_rows(values_only=True):
content += "\t".join([str(cell) if cell is not None else "" for cell in row]) + "\n"
else:
return "Unsupported file format. Please use PDF, TXT, DOCX, or XLSX."
return content[:3000] # Limit size for readability
@tool
def get_youtube_transcript(url: str) -> str:
"""
Fetch transcript text from a YouTube video.
Args:
url (str): Full YouTube video URL.
Returns:
str: Transcript text as a single string.
Raises:
ValueError: If no transcript is available or URL is invalid.
"""
try:
# Extract video ID
video_id = extract_video_id(url)
transcript = YouTubeTranscriptApi.get_transcript(video_id)
# Combine all transcript text
full_text = " ".join([entry['text'] for entry in transcript])
return full_text
except (TranscriptsDisabled, VideoUnavailable) as e:
raise ValueError(f"Transcript not available: {e}")
except Exception as e:
raise ValueError(f"Failed to fetch transcript: {e}")
@tool
def extract_video_id(url: str) -> str:
"""
Extract the video ID from a YouTube URL.
"""
match = re.search(r"(?:v=|youtu\.be/)([A-Za-z0-9_-]{11})", url)
if not match:
raise ValueError("Invalid YouTube URL")
return match.group(1)
# -----------------------------
# Load configuration from YAML
# -----------------------------
with open("config.yaml", "r") as f:
config = yaml.safe_load(f)
provider = config["provider"]
model_config = config["models"][provider]
#prompt_path = config["system_prompt_path"]
enabled_tool_names = config["tools"]
# -----------------------------
# Load system prompt
# -----------------------------
# load the system prompt from the file
with open("system_prompt.txt", "r", encoding="utf-8") as f:
system_prompt = f.read()
# System message
sys_msg = SystemMessage(content=system_prompt)
# -----------------------------
# Map tool names to functions
# -----------------------------
tool_map = {
"multiply": multiply,
"add": add,
"subtract": subtract,
"divide": divide,
"modulus": modulus,
"wiki_search": wiki_search,
"web_search": web_search,
"arvix_search": arvix_search,
"get_youtube_transcript": get_youtube_transcript,
"extract_video_id": extract_video_id,
"analyze_attachment": analyze_attachment,
}
tools = [tool_map[name] for name in enabled_tool_names]
# -------------------------------
# Step 2: Load the JSON file or tasks (Replace this part if you're loading tasks dynamically)
# -------------------------------
# Here we assume the tasks are already fetched from a URL or file.
# For now, using an example JSON array directly. Replace this with the actual loading logic.
tasks = [
{
"task_id": "8e867cd7-cff9-4e6c-867a-ff5ddc2550be",
"question": "How many studio albums were published by Mercedes Sosa between 2000 and 2009 (included)? You can use the latest 2022 version of English Wikipedia.",
"Level": "1",
"file_name": ""
},
{
"task_id": "a1e91b78-d3d8-4675-bb8d-62741b4b68a6",
"question": "In the video https://www.youtube.com/watch?v=L1vXCYZAYYM, what is the highest number of bird species to be on camera simultaneously?",
"Level": "1",
"file_name": ""
}
]
# -------------------------------
# Step 3: Create Documents from Each JSON Object
# -------------------------------
docs = []
for task in tasks:
# Debugging: Print the keys of each task to ensure 'question' exists
print(f"Keys in task: {task.keys()}")
# Ensure the required field 'question' exists
if 'question' not in task:
print(f"Skipping task with missing 'question' field: {task}")
continue
content = task.get('question', "").strip()
if not content:
print(f"Skipping task with empty 'question': {task}")
continue
# Add unique ID to each document
task['id'] = str(uuid.uuid4())
# Create a document from the task data
docs.append(Document(page_content=content, metadata=task))
# -------------------------------
# Step 4: Set up HuggingFace Embeddings and FAISS VectorStore
# -------------------------------
# Initialize HuggingFace Embedding model
#embedding_model = HuggingFaceEmbeddings(model_name="sentence-transformers/all-mpnet-base-v2")
#embedding_model = HuggingFaceEmbeddings(model_name="BAAI/bge-base-en-v1.5")
class BERTEmbeddings(Embeddings):
def __init__(self, model_name='bert-base-uncased'):
self.tokenizer = BertTokenizer.from_pretrained(model_name)
self.model = BertModel.from_pretrained(model_name)
self.model.eval() # Set to evaluation mode
def embed_documents(self, texts: List[str]) -> List[List[float]]:
return self._embed(texts)
def embed_query(self, text: str) -> List[float]:
return self._embed([text])[0]
def _embed(self, texts: List[str]) -> List[List[float]]:
inputs = self.tokenizer(texts, return_tensors='pt', padding=True, truncation=True)
with torch.no_grad():
outputs = self.model(**inputs)
embeddings = outputs.last_hidden_state.mean(dim=1) # Mean pooling
return embeddings.cpu().numpy().tolist()
# Example usage of BERTEmbedding with LangChain
embedding_model = BERTEmbeddings(model_name="bert-base-uncased")
# Sample text (replace with your own text)
docs = [
Document(page_content="Mercedes Sosa released many albums between 2000 and 2009."),
Document(page_content="She was a prominent Argentine folk singer."),
Document(page_content="Her album 'Al Despertar' was released in 1998."),
Document(page_content="She continued releasing music well into the 2000s.")
]
# Get the embeddings for the documents
vector_store = FAISS.from_documents(docs, embedding_model)
# Now, you can use the embeddings with FAISS or other retrieval systems
# For example, with FAISS:
from langchain.vectorstores import FAISS
# Assuming 'docs' contains your list of documents and 'embedding_model' is the model you created
vector_store = FAISS.from_documents(docs, embedding_model)
vector_store.save_local("faiss_index")
# -----------------------------
# Step 4: Create Retriever Tool
# -----------------------------
retriever = vector_store.as_retriever()
question_retriever_tool = create_retriever_tool(
retriever=retriever,
name="Question_Search",
description="A tool to retrieve documents related to a user's question."
)
def retriever(state: MessagesState):
"""Retriever node using similarity scores for filtering"""
query = state["messages"][0].content
results = vector_store.similarity_search_with_score(query, k=4) # top 4 matches
# Filter by score (lower is more similar; adjust threshold as needed)
threshold = 0.8
filtered = [doc for doc, score in results if score < threshold]
if not filtered:
example_msg = HumanMessage(content="No relevant documents found.")
else:
content = "\n\n".join(doc.page_content for doc in filtered)
example_msg = HumanMessage(
content=f"Here are relevant reference documents:\n\n{content}"
)
return {"messages": [sys_msg] + state["messages"] + [example_msg]}
tools = [
multiply,
add,
subtract,
divide,
modulus,
wiki_search,
web_search,
arvix_search,
]
def get_llm(provider: str, config: dict):
if provider == "google":
return ChatGoogleGenerativeAI(model=config["model"], temperature=config["temperature"])
elif provider == "groq":
return ChatGroq(model=config["model"], temperature=config["temperature"])
elif provider == "huggingface":
return ChatHuggingFace(
llm=HuggingFaceEndpoint(url=config["url"], temperature=config["temperature"])
)
else:
raise ValueError(f"Invalid provider: {provider}")
# Build graph function
def build_graph():
"""Build the graph based on provider"""
llm = get_llm(provider, model_config)
llm_with_tools = llm.bind_tools(tools)
# Node
def assistant(state: MessagesState):
"""Assistant node"""
return {"messages": [llm_with_tools.invoke(state["messages"])]}
def retriever(state: MessagesState):
user_query = state["messages"][0].content
similar_docs = vector_store.similarity_search(user_query)
if not similar_docs:
print("No similar docs found in FAISS. Using wiki_search.")
wiki_result = wiki_search.invoke(user_query)
return {
"messages": [
sys_msg,
state["messages"][0],
HumanMessage(content=f"Using Wikipedia search:\n\n{wiki_result['wiki_results']}")
]
}
else:
return {
"messages": [
sys_msg,
state["messages"][0],
HumanMessage(content=f"Reference question:\n\n{similar_docs[0].page_content}")
]
}
builder = StateGraph(MessagesState)
builder.add_node("retriever", retriever)
builder.add_node("assistant", assistant)
builder.add_node("tools", ToolNode(tools))
builder.add_edge(START, "retriever")
builder.add_edge("retriever", "assistant")
builder.add_conditional_edges(
"assistant",
tools_condition,
)
builder.add_edge("tools", "assistant")
# Compile graph
return builder.compile()