File size: 5,291 Bytes
0fbe2c5 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 |
import os
import base64
from langchain_core.messages import HumanMessage, SystemMessage
from langchain_openai import ChatOpenAI
from langchain_community.tools import DuckDuckGoSearchResults
from langchain_community.utilities import DuckDuckGoSearchAPIWrapper
import wikipediaapi
import json
import asyncio
import aiohttp
from langchain_core.tools import tool
from langgraph.graph import START, StateGraph, MessagesState
from langgraph.prebuilt import tools_condition
from langgraph.prebuilt import ToolNode
import requests
system_prompt = """You are a helpful assistant tasked with answering questions using a set of tools.
Now, I will ask you a question. Report your thoughts, and finish your answer with the following template:
FINAL ANSWER: [YOUR FINAL ANSWER].
YOUR FINAL ANSWER should be a number OR as few words as possible OR a comma separated list of numbers and/or strings. If you are asked for a number, don't use comma to write your number neither use units such as $ or percent sign unless specified otherwise. If you are asked for a string, don't use articles, neither abbreviations (e.g. for cities), and write the digits in plain text unless specified otherwise. If you are asked for a comma separated list, apply the above rules depending of whether the element to be put in the list is a number or a string.
Your answer should only start with "FINAL ANSWER: ", then follows with the answer.
"""
api_key = os.getenv("OPENAI_API_KEY")
model = ChatOpenAI(model="gpt-4o-mini", api_key=api_key, temperature=0)
@tool
def search_wiki(query: str, max_results: int = 3) -> str:
"""
Searches Wikipedia for the given query and returns a maximum of 'max_results'
relevant article summaries, titles, and URLs.
Args:
query (str): The search query for Wikipedia.
max_results (int): The maximum number of search results to retrieve (default is 3).
Returns:
str: A JSON string containing a list of dictionaries, where each dictionary
represents a Wikipedia article with its title, summary, and URL.
Returns an empty list if no results are found or an error occurs.
"""
language_code = 'en'
headers={'User-Agent': 'LangGraphAgent/1.0 ([email protected])'}
base_url = 'https://api.wikimedia.org/core/v1/wikipedia/'
endpoint = '/search/page'
url = base_url + language_code + endpoint
parameters = {'q': query, 'limit': max_results}
response = requests.get(url, headers=headers, params=parameters)
response = json.loads(response.text)
return json.dumps(response, indent=2)
@tool
def search_web(query: str, max_results: int = 3) -> str:
"""
Searches the web for the given query and returns a maximum of 'max_results'
relevant hits.
Args:
query (str): The search query for the web search.
max_results (int): The maximum number of search results to retrieve (default is 3).
Returns:
str: A JSON string containing a list, where each entry
represents a search result article with its snippet, title, link and other metadata.
Returns an empty list if no results are found or an error occurs.
"""
try:
wrapper = DuckDuckGoSearchAPIWrapper(max_results=max_results)
search = DuckDuckGoSearchResults(api_wrapper=wrapper)
#search = DuckDuckGoSearchResults()
results = search.invoke(query)
return results
except Exception as e:
print(f"An error occurred during web search: {e}")
return json.dumps([]) # Return an empty JSON list on error
tools = [
search_web,
search_wiki
]
def build_graph():
"""Build the graph"""
# Bind tools to LLM
llm_with_tools = model.bind_tools(tools)
# Node
def assistant(state: MessagesState):
"""Assistant node"""
return {"messages": [llm_with_tools.invoke(state["messages"])]}
builder = StateGraph(MessagesState)
builder.add_node("assistant", assistant)
builder.add_node("tools", ToolNode(tools))
builder.add_edge(START, "assistant")
builder.add_conditional_edges(
"assistant",
tools_condition,
)
builder.add_edge("tools", "assistant")
# Compile graph
return builder.compile()
# --- Testing the tools ---
# Test case: Basic Wikipedia search
print("--- Test Case 1: Basic Search ---")
query1 = "Principle of double effect"
result1 = search_wiki.invoke(query1)
print(f"Query: '{query1}'")
print(f"Result Type: {type(result1)}")
print(f"Result (first 500 chars): {result1[:500]}...")
print("\n")
# Test case: Basic web search
print("--- Test Case 1: Basic Search ---")
query1 = "Principle of double effect"
result1 = search_web.invoke(query1)
print(f"Query: '{query1}'")
print(f"Result Type: {type(result1)}")
print(f"Result (first 500 chars): {result1[:500]}...")
print("\n")
# test agent
if __name__ == "__main__":
question = "When was St. Thomas Aquinas born?"
# Build the graph
graph = build_graph()
# Run the graph
messages = [
SystemMessage(
content=system_prompt
),
HumanMessage(
content=question
)]
messages = graph.invoke({"messages": messages})
for m in messages["messages"]:
m.pretty_print() |