wt002's picture
Update agent.py
d036780 verified
raw
history blame
19.6 kB
# agent.py
import os
from dotenv import load_dotenv
from langgraph.graph import START, StateGraph, MessagesState
from langgraph.prebuilt import tools_condition
from langgraph.prebuilt import ToolNode
from langchain_google_genai import ChatGoogleGenerativeAI
from langchain_groq import ChatGroq
from langchain_huggingface import ChatHuggingFace, HuggingFaceEndpoint, HuggingFaceEmbeddings
from langchain_community.tools.tavily_search import TavilySearchResults
from langchain_community.document_loaders import WikipediaLoader
from langchain_community.utilities import WikipediaAPIWrapper
from langchain_community.document_loaders import ArxivLoader
#from langchain_community.vectorstores import SupabaseVectorStore
from langchain_core.messages import SystemMessage, HumanMessage
from langchain_core.tools import tool
from langchain.tools.retriever import create_retriever_tool
from supabase.client import Client, create_client
from sentence_transformers import SentenceTransformer
from langchain.embeddings.base import Embeddings
from typing import List
import numpy as np
import yaml
import pandas as pd
import uuid
import requests
import json
from langchain_core.documents import Document
from langchain_community.vectorstores import FAISS
from langchain_community.embeddings import HuggingFaceEmbeddings
from youtube_transcript_api import YouTubeTranscriptApi
from youtube_transcript_api._errors import TranscriptsDisabled, VideoUnavailable
import re
from langchain_community.document_loaders import TextLoader, PyMuPDFLoader
from docx import Document as DocxDocument
import openpyxl
from io import StringIO
from transformers import BertTokenizer, BertModel
import torch
import torch.nn.functional as F
from langchain.tools import Tool
from langchain.agents import initialize_agent, AgentType
import time
load_dotenv()
@tool
def multiply(a: int, b: int) -> int:
"""Multiply two numbers.
Args:
a: first int
b: second int
"""
return a * b
@tool
def add(a: int, b: int) -> int:
"""Add two numbers.
Args:
a: first int
b: second int
"""
return a + b
@tool
def subtract(a: int, b: int) -> int:
"""Subtract two numbers.
Args:
a: first int
b: second int
"""
return a - b
@tool
def divide(a: int, b: int) -> int:
"""Divide two numbers.
Args:
a: first int
b: second int
"""
if b == 0:
raise ValueError("Cannot divide by zero.")
return a / b
@tool
def modulus(a: int, b: int) -> int:
"""Get the modulus of two numbers.
Args:
a: first int
b: second int
"""
return a % b
@tool
def calculator(a: int, b: int, operation: str) -> float:
"""
Perform a calculation between two numbers.
Args:
a: First number.
b: Second number.
operation: One of 'add', 'subtract', 'multiply', 'divide', 'modulus'.
"""
operation = operation.lower()
if operation == "add":
return add(a, b)
elif operation == "subtract":
return subtract(a, b)
elif operation == "multiply":
return multiply(a, b)
elif operation == "divide":
return divide(a, b)
elif operation == "modulus":
return modulus(a, b)
else:
raise ValueError(f"Unsupported operation: {operation}")
@tool
def wiki_search(query: str) -> str:
"""Search Wikipedia for a query and return maximum 2 results.
Args:
query: The search query."""
search_docs = WikipediaLoader(query=query, load_max_docs=2).load()
formatted_search_docs = "\n\n---\n\n".join(
[
f'<Document source="{doc.metadata["source"]}" page="{doc.metadata.get("page", "")}"/>\n{doc.page_content}\n</Document>'
for doc in search_docs
])
return {"wiki_results": formatted_search_docs}
@tool
def wikidata_query(query: str) -> str:
"""
Run a SPARQL query on Wikidata and return results.
"""
endpoint_url = "https://query.wikidata.org/sparql"
headers = {
"Accept": "application/sparql-results+json"
}
response = requests.get(endpoint_url, headers=headers, params={"query": query})
data = response.json()
return json.dumps(data, indent=2)
@tool
def web_search(query: str) -> str:
"""Search Tavily for a query and return maximum 3 results.
Args:
query: The search query."""
search_docs = TavilySearchResults(max_results=3).invoke(query=query)
formatted_search_docs = "\n\n---\n\n".join(
[
f'<Document source="{doc.metadata["source"]}" page="{doc.metadata.get("page", "")}"/>\n{doc.page_content}\n</Document>'
for doc in search_docs
])
return {"web_results": formatted_search_docs}
@tool
def arvix_search(query: str) -> str:
"""Search Arxiv for a query and return maximum 3 result.
Args:
query: The search query."""
search_docs = ArxivLoader(query=query, load_max_docs=3).load()
formatted_search_docs = "\n\n---\n\n".join(
[
f'<Document source="{doc.metadata["source"]}" page="{doc.metadata.get("page", "")}"/>\n{doc.page_content[:1000]}\n</Document>'
for doc in search_docs
])
return {"arvix_results": formatted_search_docs}
@tool
def analyze_attachment(file_path: str) -> str:
"""
Analyzes attachments including PY, PDF, TXT, DOCX, and XLSX files and returns text content.
Args:
file_path: Local path to the attachment.
"""
if not os.path.exists(file_path):
return f"File not found: {file_path}"
try:
ext = file_path.lower()
if ext.endswith(".pdf"):
loader = PyMuPDFLoader(file_path)
documents = loader.load()
content = "\n\n".join([doc.page_content for doc in documents])
elif ext.endswith(".txt") or ext.endswith(".py"):
# Both .txt and .py are plain text files
with open(file_path, "r", encoding="utf-8") as file:
content = file.read()
elif ext.endswith(".docx"):
doc = DocxDocument(file_path)
content = "\n".join([para.text for para in doc.paragraphs])
elif ext.endswith(".xlsx"):
wb = openpyxl.load_workbook(file_path, data_only=True)
content = ""
for sheet in wb:
content += f"Sheet: {sheet.title}\n"
for row in sheet.iter_rows(values_only=True):
content += "\t".join([str(cell) if cell is not None else "" for cell in row]) + "\n"
else:
return "Unsupported file format. Please use PY, PDF, TXT, DOCX, or XLSX."
return content[:3000] # Limit output size for readability
except Exception as e:
return f"An error occurred while processing the file: {str(e)}"
@tool
def get_youtube_transcript(url: str) -> str:
"""
Fetch transcript text from a YouTube video.
Args:
url (str): Full YouTube video URL.
Returns:
str: Transcript text as a single string.
Raises:
ValueError: If no transcript is available or URL is invalid.
"""
try:
# Extract video ID
video_id = extract_video_id(url)
transcript = YouTubeTranscriptApi.get_transcript(video_id)
# Combine all transcript text
full_text = " ".join([entry['text'] for entry in transcript])
return full_text
except (TranscriptsDisabled, VideoUnavailable) as e:
raise ValueError(f"Transcript not available: {e}")
except Exception as e:
raise ValueError(f"Failed to fetch transcript: {e}")
@tool
def extract_video_id(url: str) -> str:
"""
Extract the video ID from a YouTube URL.
"""
match = re.search(r"(?:v=|youtu\.be/)([A-Za-z0-9_-]{11})", url)
if not match:
raise ValueError("Invalid YouTube URL")
return match.group(1)
# -----------------------------
# Load configuration from YAML
# -----------------------------
with open("config.yaml", "r") as f:
config = yaml.safe_load(f)
provider = config["provider"]
model_config = config["models"][provider]
#prompt_path = config["system_prompt_path"]
enabled_tool_names = config["tools"]
# -----------------------------
# Load system prompt
# -----------------------------
# load the system prompt from the file
with open("system_prompt.txt", "r", encoding="utf-8") as f:
system_prompt = f.read()
# System message
sys_msg = SystemMessage(content=system_prompt)
# -----------------------------
# Map tool names to functions
# -----------------------------
tool_map = {
"multiply": multiply,
"add": add,
"subtract": subtract,
"divide": divide,
"modulus": modulus,
"wiki_search": wiki_search,
"web_search": web_search,
"arvix_search": arvix_search,
"get_youtube_transcript": get_youtube_transcript,
"extract_video_id": extract_video_id,
"analyze_attachment": analyze_attachment,
"wikidata_query": wikidata_query
}
tools = [tool_map[name] for name in enabled_tool_names]
# -------------------------------
# Step 2: Load the JSON file or tasks (Replace this part if you're loading tasks dynamically)
# -------------------------------
# Here we assume the tasks are already fetched from a URL or file.
# For now, using an example JSON array directly. Replace this with the actual loading logic.
tasks = [
{
"task_id": "8e867cd7-cff9-4e6c-867a-ff5ddc2550be",
"question": "How many studio albums were published by Mercedes Sosa between 2000 and 2009 (included)? You can use the latest 2022 version of English Wikipedia.",
"Level": "1",
"file_name": ""
},
{
"task_id": "a1e91b78-d3d8-4675-bb8d-62741b4b68a6",
"question": "In the video https://www.youtube.com/watch?v=L1vXCYZAYYM, what is the highest number of bird species to be on camera simultaneously?",
"Level": "1",
"file_name": ""
}
]
# -------------------------------
# Step 3: Create Documents from Each JSON Object
# -------------------------------
docs = []
for task in tasks:
# Debugging: Print the keys of each task to ensure 'question' exists
print(f"Keys in task: {task.keys()}")
# Ensure the required field 'question' exists
if 'question' not in task:
print(f"Skipping task with missing 'question' field: {task}")
continue
content = task.get('question', "").strip()
if not content:
print(f"Skipping task with empty 'question': {task}")
continue
# Add unique ID to each document
task['id'] = str(uuid.uuid4())
# Create a document from the task data
docs.append(Document(page_content=content, metadata=task))
# -------------------------------
# Step 4: Set up BERT Embeddings and FAISS VectorStore
# -------------------------------
# -----------------------------
# 1. Define Custom BERT Embedding Model
# -----------------------------
class BERTEmbeddings(Embeddings):
def __init__(self, model_name='bert-base-uncased'):
self.tokenizer = BertTokenizer.from_pretrained(model_name)
self.model = BertModel.from_pretrained(model_name)
self.model.eval() # Set model to eval mode
def embed_documents(self, texts):
inputs = self.tokenizer(texts, return_tensors='pt', padding=True, truncation=True)
with torch.no_grad():
outputs = self.model(**inputs)
embeddings = outputs.last_hidden_state.mean(dim=1)
embeddings = F.normalize(embeddings, p=2, dim=1) # Normalize for cosine similarity
return embeddings.cpu().numpy()
def embed_query(self, text):
return self.embed_documents([text])[0]
# -----------------------------
# 2. Initialize Embedding Model
# -----------------------------
embedding_model = BERTEmbeddings()
# -----------------------------
# 3. Prepare Documents
# -----------------------------
docs = [
Document(page_content="Mercedes Sosa released many albums between 2000 and 2009.", metadata={"id": 1}),
Document(page_content="She was a prominent Argentine folk singer.", metadata={"id": 2}),
Document(page_content="Her album 'Al Despertar' was released in 1998.", metadata={"id": 3}),
Document(page_content="She continued releasing music well into the 2000s.", metadata={"id": 4}),
]
# -----------------------------
# 4. Create FAISS Vector Store
# -----------------------------
vector_store = FAISS.from_documents(docs, embedding_model)
vector_store.save_local("faiss_index")
# -----------------------------
# 5. Query & Filter Results (optional preview)
# -----------------------------
query = "How many albums did Mercedes Sosa release between 2000 and 2009?"
results = vector_store.similarity_search_with_score(query, k=5)
threshold = 0.75
filtered = [doc for doc, score in results if score < threshold]
print("\n📊 Retrieved Documents with Similarity Scores:")
filtered = []
for doc, score in results:
print(f"🔢 Score: {score:.4f}")
print(f"📄 Content: {doc.page_content}")
if score < threshold:
filtered.append(doc)
print("✅ Accepted")
else:
print("❌ Rejected")
print("-" * 80)
# -----------------------------
# 6. Create LangChain Retriever Tool
# -----------------------------
retriever = vector_store.as_retriever()
# -------------------------------
# Step 6: Create LangChain Tools
# -------------------------------
wiki_tool = WikipediaAPIWrapper() # If it's a proper LangChain tool
calc_tool = calculator
file_tool = analyze_attachment
web_tool = web_search
arvix_tool = arvix_search
youtube_tool = get_youtube_transcript
video_tool = extract_video_id
analyze_tool = analyze_attachment
wikiq_tool = wikidata_query
# -------------------------------
# Step 7: Create the Planner-Agent Logic
# -------------------------------
# Define the agent tool set
from langchain.chat_models import ChatOpenAI
from langchain.agents import initialize_agent, AgentType
# Define the tools (as you've already done)
tools = [wiki_tool, calc_tool, file_tool, web_tool, arvix_tool, youtube_tool, video_tool, analyze_tool, wikiq_tool]
# Define the LLM before using it
llm = ChatOpenAI(temperature=0, model="gpt-4") # or "gpt-3.5-turbo"
# Create an agent using the planner, task classifier, and decision logic
agent = initialize_agent(
tools=tools,
llm=llm,
agent_type=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
verbose=True
)
# -------------------------------
# Step 8: Use the Planner, Classifier, and Decision Logic
# -------------------------------
def process_question(question):
# Step 1: Planner generates the task sequence
tasks = planner(question)
print(f"Tasks to perform: {tasks}")
# Step 2: Classify the task (based on question)
task_type = task_classifier(question)
print(f"Task type: {task_type}")
# Step 3: Use the classifier and planner to decide on the next task or node
state = {"question": question, "last_response": ""}
next_task = decide_task(state)
print(f"Next task: {next_task}")
# Step 4: Use node skipper logic (skip if needed)
skip = node_skipper(state)
if skip:
print(f"Skipping to {skip}")
return skip # Or move directly to generating answer
# Step 5: Execute task (with error handling)
try:
if task_type == "wiki_search":
response = wiki_search_tool(question)
elif task_type == "math":
response = calculator_tool(question)
else:
response = "Default answer logic"
# Step 6: Final response formatting
final_response = generate_final_answer(state, {'wiki_search': response})
return final_response
except Exception as e:
print(f"Error executing task: {e}")
return "Sorry, I encountered an error processing your request."
# Run the process
question = "How many albums did Mercedes Sosa release between 2000 and 2009?"
response = process_question(question)
print("Final Response:", response)
question_retriever_tool = create_retriever_tool(
retriever=retriever,
name="Question_Search",
description="A tool to retrieve documents related to a user's question."
)
def retriever(state: MessagesState):
"""Retriever node using similarity scores for filtering"""
query = state["messages"][0].content
results = vector_store.similarity_search_with_score(query, k=4) # top 4 matches
# Dynamically adjust threshold based on query complexity
threshold = 0.75 if "who" in query else 0.8
filtered = [doc for doc, score in results if score < threshold]
# Provide a default message if no documents found
if not filtered:
example_msg = HumanMessage(content="No relevant documents found.")
else:
content = "\n\n".join(doc.page_content for doc in filtered)
example_msg = HumanMessage(
content=f"Here are relevant reference documents:\n\n{content}"
)
return {"messages": [sys_msg] + state["messages"] + [example_msg]}
tools = [
multiply,
add,
subtract,
divide,
modulus,
wiki_search,
web_search,
arvix_search,
]
def get_llm(provider: str, config: dict):
if provider == "google":
return ChatGoogleGenerativeAI(model=config["model"], temperature=config["temperature"])
elif provider == "groq":
return ChatGroq(model=config["model"], temperature=config["temperature"])
elif provider == "huggingface":
return ChatHuggingFace(
llm=HuggingFaceEndpoint(url=config["url"], temperature=config["temperature"])
)
else:
raise ValueError(f"Invalid provider: {provider}")
def generate_final_answer(state, tools_results):
final_answer = ""
# Concatenate results from each tool (wiki_search, calculator, etc.)
for tool_name, result in tools_results.items():
final_answer += f"{tool_name} result: {result}\n"
return final_answer
# Build graph function
def build_graph():
"""Build the graph based on provider"""
llm = get_llm(provider, model_config)
llm_with_tools = llm.bind_tools(tools)
# Node
def assistant(state: MessagesState):
"""Assistant node"""
return {"messages": [llm_with_tools.invoke(state["messages"])]}
def retriever(state: MessagesState):
user_query = state["messages"][0].content
similar_docs = vector_store.similarity_search(user_query)
if not similar_docs:
print("No similar docs found in FAISS. Using wiki_search.")
wiki_result = wiki_search.invoke(user_query)
return {
"messages": [
sys_msg,
state["messages"][0],
HumanMessage(content=f"Using Wikipedia search:\n\n{wiki_result['wiki_results']}")
]
}
else:
return {
"messages": [
sys_msg,
state["messages"][0],
HumanMessage(content=f"Reference question:\n\n{similar_docs[0].page_content}")
]
}
builder = StateGraph(MessagesState)
builder.add_node("retriever", retriever)
builder.add_node("assistant", assistant)
builder.add_node("tools", ToolNode(tools))
builder.add_edge(START, "retriever")
builder.add_edge("retriever", "assistant")
builder.add_conditional_edges(
"assistant",
tools_condition,
)
builder.add_edge("tools", "assistant")
# Compile graph
return builder.compile()