wt002's picture
Update agent.py
e397cdc verified
raw
history blame
22.8 kB
# agent.py
import os
from dotenv import load_dotenv
from langgraph.graph import START, StateGraph, MessagesState
from langgraph.prebuilt import tools_condition
from langgraph.prebuilt import ToolNode
from langchain_google_genai import ChatGoogleGenerativeAI
from langchain_groq import ChatGroq
from langchain_huggingface import ChatHuggingFace, HuggingFaceEndpoint, HuggingFaceEmbeddings
from langchain_community.tools.tavily_search import TavilySearchResults
from langchain_community.document_loaders import WikipediaLoader
from langchain_community.utilities import WikipediaAPIWrapper
from langchain_community.document_loaders import ArxivLoader
#from langchain_community.vectorstores import SupabaseVectorStore
from langchain_core.messages import SystemMessage, HumanMessage
from langchain_core.tools import tool
from langchain.tools.retriever import create_retriever_tool
from supabase.client import Client, create_client
from sentence_transformers import SentenceTransformer
from langchain.embeddings.base import Embeddings
from typing import List
import numpy as np
import yaml
import pandas as pd
import uuid
import requests
import json
from langchain_core.documents import Document
from langchain_community.vectorstores import FAISS
from langchain_community.embeddings import HuggingFaceEmbeddings
from youtube_transcript_api import YouTubeTranscriptApi
from youtube_transcript_api._errors import TranscriptsDisabled, VideoUnavailable
import re
from langchain_community.document_loaders import TextLoader, PyMuPDFLoader
from docx import Document as DocxDocument
import openpyxl
from io import StringIO
from transformers import BertTokenizer, BertModel
import torch
import torch.nn.functional as F
from langchain.agents import initialize_agent, AgentType
from langchain_community.chat_models import ChatOpenAI
from langchain_community.tools import Tool
import time
from huggingface_hub import InferenceClient
from langchain_community.llms import HuggingFaceHub
from langchain.prompts import PromptTemplate
from langchain.chains import LLMChain
from langchain.agents import initialize_agent, Tool, AgentType
from transformers import pipeline
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
load_dotenv()
@tool
def multiply(a: int, b: int) -> int:
"""Multiply two numbers.
Args:
a: first int
b: second int
"""
return a * b
@tool
def add(a: int, b: int) -> int:
"""Add two numbers.
Args:
a: first int
b: second int
"""
return a + b
@tool
def subtract(a: int, b: int) -> int:
"""Subtract two numbers.
Args:
a: first int
b: second int
"""
return a - b
@tool
def divide(a: int, b: int) -> int:
"""Divide two numbers.
Args:
a: first int
b: second int
"""
if b == 0:
raise ValueError("Cannot divide by zero.")
return a / b
@tool
def modulus(a: int, b: int) -> int:
"""Get the modulus of two numbers.
Args:
a: first int
b: second int
"""
return a % b
@tool
def calculator(inputs: dict):
"""Perform mathematical operations based on the operation provided."""
a = inputs.get("a")
b = inputs.get("b")
operation = inputs.get("operation")
if operation == "add":
return a + b
elif operation == "subtract":
return a - b
elif operation == "multiply":
return a * b
elif operation == "divide":
if b == 0:
return "Error: Division by zero"
return a / b
elif operation == "modulus":
return a % b
else:
return "Unknown operation"
@tool
def wiki_search(query: str) -> str:
"""Search Wikipedia for a query and return maximum 2 results.
Args:
query: The search query."""
search_docs = WikipediaLoader(query=query, load_max_docs=2).load()
formatted_search_docs = "\n\n---\n\n".join(
[
f'<Document source="{doc.metadata["source"]}" page="{doc.metadata.get("page", "")}"/>\n{doc.page_content}\n</Document>'
for doc in search_docs
])
return {"wiki_results": formatted_search_docs}
@tool
def wikidata_query(query: str) -> str:
"""
Run a SPARQL query on Wikidata and return results.
"""
endpoint_url = "https://query.wikidata.org/sparql"
headers = {
"Accept": "application/sparql-results+json"
}
response = requests.get(endpoint_url, headers=headers, params={"query": query})
data = response.json()
return json.dumps(data, indent=2)
@tool
def web_search(query: str) -> str:
"""Search Tavily for a query and return maximum 3 results.
Args:
query: The search query."""
search_docs = TavilySearchResults(max_results=3).invoke(query=query)
formatted_search_docs = "\n\n---\n\n".join(
[
f'<Document source="{doc.metadata["source"]}" page="{doc.metadata.get("page", "")}"/>\n{doc.page_content}\n</Document>'
for doc in search_docs
])
return {"web_results": formatted_search_docs}
@tool
def arvix_search(query: str) -> str:
"""Search Arxiv for a query and return maximum 3 result.
Args:
query: The search query."""
search_docs = ArxivLoader(query=query, load_max_docs=3).load()
formatted_search_docs = "\n\n---\n\n".join(
[
f'<Document source="{doc.metadata["source"]}" page="{doc.metadata.get("page", "")}"/>\n{doc.page_content[:1000]}\n</Document>'
for doc in search_docs
])
return {"arvix_results": formatted_search_docs}
@tool
def analyze_attachment(file_path: str) -> str:
"""
Analyzes attachments including PY, PDF, TXT, DOCX, and XLSX files and returns text content.
Args:
file_path: Local path to the attachment.
"""
if not os.path.exists(file_path):
return f"File not found: {file_path}"
try:
ext = file_path.lower()
if ext.endswith(".pdf"):
loader = PyMuPDFLoader(file_path)
documents = loader.load()
content = "\n\n".join([doc.page_content for doc in documents])
elif ext.endswith(".txt") or ext.endswith(".py"):
# Both .txt and .py are plain text files
with open(file_path, "r", encoding="utf-8") as file:
content = file.read()
elif ext.endswith(".docx"):
doc = DocxDocument(file_path)
content = "\n".join([para.text for para in doc.paragraphs])
elif ext.endswith(".xlsx"):
wb = openpyxl.load_workbook(file_path, data_only=True)
content = ""
for sheet in wb:
content += f"Sheet: {sheet.title}\n"
for row in sheet.iter_rows(values_only=True):
content += "\t".join([str(cell) if cell is not None else "" for cell in row]) + "\n"
else:
return "Unsupported file format. Please use PY, PDF, TXT, DOCX, or XLSX."
return content[:3000] # Limit output size for readability
except Exception as e:
return f"An error occurred while processing the file: {str(e)}"
@tool
def get_youtube_transcript(url: str) -> str:
"""
Fetch transcript text from a YouTube video.
Args:
url (str): Full YouTube video URL.
Returns:
str: Transcript text as a single string.
Raises:
ValueError: If no transcript is available or URL is invalid.
"""
try:
# Extract video ID
video_id = extract_video_id(url)
transcript = YouTubeTranscriptApi.get_transcript(video_id)
# Combine all transcript text
full_text = " ".join([entry['text'] for entry in transcript])
return full_text
except (TranscriptsDisabled, VideoUnavailable) as e:
raise ValueError(f"Transcript not available: {e}")
except Exception as e:
raise ValueError(f"Failed to fetch transcript: {e}")
@tool
def extract_video_id(url: str) -> str:
"""
Extract the video ID from a YouTube URL.
"""
match = re.search(r"(?:v=|youtu\.be/)([A-Za-z0-9_-]{11})", url)
if not match:
raise ValueError("Invalid YouTube URL")
return match.group(1)
# -----------------------------
# Load configuration from YAML
# -----------------------------
with open("config.yaml", "r") as f:
config = yaml.safe_load(f)
provider = config["provider"]
model_config = config["models"][provider]
#prompt_path = config["system_prompt_path"]
enabled_tool_names = config["tools"]
# -----------------------------
# Load system prompt
# -----------------------------
# load the system prompt from the file
with open("system_prompt.txt", "r", encoding="utf-8") as f:
system_prompt = f.read()
# System message
sys_msg = SystemMessage(content=system_prompt)
# -----------------------------
# Map tool names to functions
# -----------------------------
tool_map = {
"multiply": multiply,
"add": add,
"subtract": subtract,
"divide": divide,
"modulus": modulus,
"wiki_search": wiki_search,
"web_search": web_search,
"arvix_search": arvix_search,
"get_youtube_transcript": get_youtube_transcript,
"extract_video_id": extract_video_id,
"analyze_attachment": analyze_attachment,
"wikidata_query": wikidata_query
}
tools = [tool_map[name] for name in enabled_tool_names]
# -------------------------------
# Step 2: Load the JSON file or tasks (Replace this part if you're loading tasks dynamically)
# -------------------------------
# Here we assume the tasks are already fetched from a URL or file.
# For now, using an example JSON array directly. Replace this with the actual loading logic.
tasks = [
{
"task_id": "8e867cd7-cff9-4e6c-867a-ff5ddc2550be",
"question": "How many studio albums were published by Mercedes Sosa between 2000 and 2009 (included)? You can use the latest 2022 version of English Wikipedia.",
"Level": "1",
"file_name": ""
},
{
"task_id": "a1e91b78-d3d8-4675-bb8d-62741b4b68a6",
"question": "In the video https://www.youtube.com/watch?v=L1vXCYZAYYM, what is the highest number of bird species to be on camera simultaneously?",
"Level": "1",
"file_name": ""
}
]
# -------------------------------
# Step 3: Create Documents from Each JSON Object
# -------------------------------
docs = []
for task in tasks:
# Debugging: Print the keys of each task to ensure 'question' exists
print(f"Keys in task: {task.keys()}")
# Ensure the required field 'question' exists
if 'question' not in task:
print(f"Skipping task with missing 'question' field: {task}")
continue
content = task.get('question', "").strip()
if not content:
print(f"Skipping task with empty 'question': {task}")
continue
# Add unique ID to each document
task['id'] = str(uuid.uuid4())
# Create a document from the task data
docs.append(Document(page_content=content, metadata=task))
# -------------------------------
# Step 4: Set up BERT Embeddings and FAISS VectorStore
# -------------------------------
# -----------------------------
# 1. Define Custom BERT Embedding Model
# -----------------------------
class BERTEmbeddings(Embeddings):
def __init__(self, model_name='bert-base-uncased'):
self.tokenizer = BertTokenizer.from_pretrained(model_name)
self.model = BertModel.from_pretrained(model_name)
self.model.eval() # Set model to eval mode
def embed_documents(self, texts):
inputs = self.tokenizer(texts, return_tensors='pt', padding=True, truncation=True)
with torch.no_grad():
outputs = self.model(**inputs)
embeddings = outputs.last_hidden_state.mean(dim=1)
embeddings = F.normalize(embeddings, p=2, dim=1) # Normalize for cosine similarity
return embeddings.cpu().numpy()
def embed_query(self, text):
return self.embed_documents([text])[0]
# -----------------------------
# 2. Initialize Embedding Model
# -----------------------------
embedding_model = BERTEmbeddings()
# -----------------------------
# 3. Prepare Documents
# -----------------------------
docs = [
Document(page_content="Mercedes Sosa released many albums between 2000 and 2009.", metadata={"id": 1}),
Document(page_content="She was a prominent Argentine folk singer.", metadata={"id": 2}),
Document(page_content="Her album 'Al Despertar' was released in 1998.", metadata={"id": 3}),
Document(page_content="She continued releasing music well into the 2000s.", metadata={"id": 4}),
]
# -----------------------------
# 4. Create FAISS Vector Store
# -----------------------------
vector_store = FAISS.from_documents(docs, embedding_model)
vector_store.save_local("faiss_index")
# -----------------------------
# 6. Create LangChain Retriever Tool
# -----------------------------
retriever = vector_store.as_retriever()
question_retriever_tool = create_retriever_tool(
retriever=retriever,
name="Question_Search",
description="A tool to retrieve documents related to a user's question."
)
# -------------------------------
# Step 6: Create LangChain Tools
# -------------------------------
calc_tool = calculator
file_tool = analyze_attachment
web_tool = web_search
wiki_tool = wiki_search
arvix_tool = arvix_search
youtube_tool = get_youtube_transcript
video_tool = extract_video_id
analyze_tool = analyze_attachment
wikiq_tool = wikidata_query
# -------------------------------
# Step 7: Create the Planner-Agent Logic
# -------------------------------
# Define the tools (as you've already done)
tools = [wiki_tool, calc_tool, file_tool, web_tool, arvix_tool, youtube_tool, video_tool, analyze_tool, wikiq_tool]
# Define the LLM before using it
#llm = ChatOpenAI(temperature=0, model="gpt-3.5-turbo") # or "gpt-3.5-turbo" "gpt-4"
#llm = ChatMistralAI(model="mistral-7b-instruct-v0.1")
# Get the Hugging Face API token from the environment variable
hf_token = os.getenv("HF_TOKEN")
login(token="HF_TOKEN")
# Initialize the desired model and parameters
model_name = "mistralai/Mistral-7B-Instruct-v0.1"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(model_name)
# Create a text generation pipeline
pipe = pipeline(
"text-generation",
model=model,
tokenizer=tokenizer,
max_new_tokens=512,
temperature=0.7,
top_p=0.95,
repetition_penalty=1.15
)
# Create LangChain LLM wrapper
llm = HuggingFacePipeline(pipeline=pipe)
# Initialize the LangChain agent with the tool(s) and the model
agent = initialize_agent(
tools=tools,
llm=llm,
agent_type=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
verbose=True
)
# -------------------------------
# Step 8: Use the Planner, Classifier, and Decision Logic
# -------------------------------
def process_question(question):
# Step 1: Planner generates the task sequence
tasks = planner(question)
print(f"Tasks to perform: {tasks}")
# Step 2: Classify the task (based on question)
task_type = task_classifier(question)
print(f"Task type: {task_type}")
# Step 3: Use the classifier and planner to decide on the next task or node
state = {"question": question, "last_response": ""}
next_task = decide_task(state)
print(f"Next task: {next_task}")
# Step 4: Use node skipper logic (skip if needed)
skip = node_skipper(state)
if skip:
print(f"Skipping to {skip}")
return skip # Or move directly to generating answer
# Step 5: Execute task (with error handling)
try:
if task_type == "wiki_search":
response = wiki_tool(question)
elif task_type == "math":
response = calc_tool(question)
else:
response = "Default answer logic"
# Step 6: Final response formatting
final_response = final_answer_tool(state, {'wiki_search': response})
return final_response
except Exception as e:
print(f"Error executing task: {e}")
return "Sorry, I encountered an error processing your request."
# Run the process
question = "How many albums did Mercedes Sosa release between 2000 and 2009?"
response = agent.invoke(question)
print("Final Response:", response)
def retriever(state: MessagesState):
"""Retriever node using similarity scores for filtering"""
query = state["messages"][0].content
results = vector_store.similarity_search_with_score(query, k=4) # top 4 matches
# Dynamically adjust threshold based on query complexity
threshold = 0.75 if "who" in query else 0.8
filtered = [doc for doc, score in results if score < threshold]
# Provide a default message if no documents found
if not filtered:
example_msg = HumanMessage(content="No relevant documents found.")
else:
content = "\n\n".join(doc.page_content for doc in filtered)
example_msg = HumanMessage(
content=f"Here are relevant reference documents:\n\n{content}"
)
return {"messages": [sys_msg] + state["messages"] + [example_msg]}
# ----------------------------------------------------------------
# LLM Loader
# ----------------------------------------------------------------
def get_llm(provider: str, config: dict):
if provider == "google":
from langchain_google_genai import ChatGoogleGenerativeAI
return ChatGoogleGenerativeAI(model=config["model"], temperature=config["temperature"])
elif provider == "groq":
from langchain_groq import ChatGroq
return ChatGroq(model=config["model"], temperature=config["temperature"])
elif provider == "huggingface":
from langchain_huggingface import ChatHuggingFace
from langchain_huggingface import HuggingFaceEndpoint
return ChatHuggingFace(
llm=HuggingFaceEndpoint(url=config["url"], temperature=config["temperature"])
)
else:
raise ValueError(f"Invalid provider: {provider}")
# ----------------------------------------------------------------
# Planning & Execution Logic
# ----------------------------------------------------------------
def planner(question: str) -> list:
if "calculate" in question or any(op in question for op in ["add", "subtract", "multiply", "divide", "modulus"]):
return ["math"]
elif "wiki" in question or "who is" in question.lower():
return ["wiki_search"]
else:
return ["default"]
def task_classifier(question: str) -> str:
if any(op in question.lower() for op in ["add", "subtract", "multiply", "divide", "modulus"]):
return "math"
elif "who" in question.lower() or "what is" in question.lower():
return "wiki_search"
else:
return "default"
# Function to extract math operation from the question
def extract_math_from_question(question: str):
"""Extract numbers and operator from a math question."""
match = re.search(r'(\d+)\s*(\+|\-|\*|\/|\%)\s*(\d+)', question)
if match:
num1 = int(match.group(1))
operator = match.group(2)
num2 = int(match.group(3))
return num1, operator, num2
else:
return None
def decide_task(state: dict) -> str:
return planner(state["question"])[0]
def node_skipper(state: dict) -> bool:
return False
def generate_final_answer(state: dict, task_results: dict) -> str:
if "wiki_search" in task_results:
return f"📚 Wiki Summary:\n{task_results['wiki_search']}"
elif "math" in task_results:
return f"🧮 Math Result: {task_results['math']}"
else:
return "🤖 Unable to generate a specific answer."
# ----------------------------------------------------------------
# Process Function (Main Agent Runner)
# ----------------------------------------------------------------
def process_question(question: str):
tasks = planner(question)
print(f"Tasks to perform: {tasks}")
task_type = task_classifier(question)
print(f"Task type: {task_type}")
state = {"question": question, "last_response": "", "messages": [HumanMessage(content=question)]}
next_task = decide_task(state)
print(f"Next task: {next_task}")
if node_skipper(state):
print(f"Skipping task: {next_task}")
return "Task skipped."
try:
if task_type == "wiki_search":
response = wiki_tool.run(question)
elif task_type == "math":
# You should dynamically parse these inputs in real use
response = calc_tool.run(question)
elif task_type == "retriever":
retrieval_result = retriever(state)
response = retrieval_result["messages"][-1].content
else:
response = "Default fallback answer."
return generate_final_answer(state, {task_type: response})
except Exception as e:
print(f"❌ Error: {e}")
return "Sorry, I encountered an error processing your request."
# Build graph function
def build_graph(provider: str, model_config: dict):
from langgraph.prebuilt.tool_node import ToolNode
llm = get_llm(provider, model_config)
llm_with_tools = llm.bind_tools(tools)
sys_msg = SystemMessage(content="You are a helpful assistant.")
def assistant(state: MessagesState):
return {"messages": [llm_with_tools.invoke(state["messages"])]}
def retriever(state: MessagesState):
user_query = state["messages"][0].content
similar_docs = vector_store.similarity_search(user_query)
if not similar_docs:
wiki_result = wiki_tool.run(user_query)
return {
"messages": [
sys_msg,
state["messages"][0],
HumanMessage(content=f"Using Wikipedia search:\n\n{wiki_result}")
]
}
else:
return {
"messages": [
sys_msg,
state["messages"][0],
HumanMessage(content=f"Reference:\n\n{similar_docs[0].page_content}")
]
}
def tools_condition(state: MessagesState) -> str:
if "use tool" in state["messages"][-1].content.lower():
return "tools"
else:
return END
builder = StateGraph(MessagesState)
builder.add_node("retriever", retriever)
builder.add_node("assistant", assistant)
builder.add_node("tools", ToolNode(tools))
builder.set_entry_point("retriever")
builder.add_edge("retriever", "assistant")
builder.add_conditional_edges("assistant", tools_condition)
builder.add_edge("tools", "assistant")
# Compile graph
return builder.compile()