|
import os |
|
import base64 |
|
from langchain_core.messages import HumanMessage, SystemMessage |
|
from langchain_openai import ChatOpenAI |
|
from langchain_community.tools import DuckDuckGoSearchResults |
|
from langchain_community.utilities import DuckDuckGoSearchAPIWrapper |
|
import wikipediaapi |
|
import json |
|
import asyncio |
|
import aiohttp |
|
from langchain_core.tools import tool |
|
from langgraph.graph import START, StateGraph, MessagesState |
|
from langgraph.prebuilt import tools_condition |
|
from langgraph.prebuilt import ToolNode |
|
from langchain_tavily import TavilySearch |
|
|
|
import requests |
|
|
|
system_prompt = """You are a helpful assistant tasked with answering questions using a set of tools. |
|
Now, I will ask you a question. Report your thoughts, and finish your answer with the following template: |
|
FINAL ANSWER: [YOUR FINAL ANSWER]. |
|
YOUR FINAL ANSWER should be a number OR as few words as possible OR a comma separated list of numbers and/or strings. If you are asked for a number, don't use comma to write your number neither use units such as $ or percent sign unless specified otherwise. If you are asked for a string, don't use articles, neither abbreviations (e.g. for cities), and write the digits in plain text unless specified otherwise. If you are asked for a comma separated list, apply the above rules depending of whether the element to be put in the list is a number or a string. |
|
Your answer should only start with "FINAL ANSWER: ", then follows with the answer. |
|
""" |
|
|
|
api_key = os.getenv("OPENAI_API_KEY") |
|
|
|
model = ChatOpenAI(model="gpt-4o-mini", api_key=api_key, temperature=0) |
|
|
|
@tool |
|
def search_wiki(query: str, max_results: int = 3) -> str: |
|
""" |
|
Searches Wikipedia for the given query and returns a maximum of 'max_results' |
|
relevant article summaries, titles, and URLs. |
|
|
|
Args: |
|
query (str): The search query for Wikipedia. |
|
max_results (int): The maximum number of search results to retrieve (default is 3). |
|
|
|
Returns: |
|
str: A JSON string containing a list of dictionaries, where each dictionary |
|
represents a Wikipedia article with its title, summary, and URL. |
|
Returns an empty list if no results are found or an error occurs. |
|
""" |
|
|
|
language_code = 'en' |
|
|
|
headers={'User-Agent': 'LangGraphAgent/1.0 ([email protected])'} |
|
|
|
base_url = 'https://api.wikimedia.org/core/v1/wikipedia/' |
|
endpoint = '/search/page' |
|
url = base_url + language_code + endpoint |
|
parameters = {'q': query, 'limit': max_results} |
|
response = requests.get(url, headers=headers, params=parameters) |
|
response = json.loads(response.text) |
|
return json.dumps(response, indent=2) |
|
|
|
|
|
tavily_search_tool = TavilySearch( |
|
max_results=5, |
|
topic="general", |
|
) |
|
|
|
tools = [ |
|
tavily_search_tool, |
|
search_wiki |
|
] |
|
|
|
def build_graph(): |
|
"""Build the graph""" |
|
|
|
llm_with_tools = model.bind_tools(tools) |
|
|
|
|
|
def assistant(state: MessagesState): |
|
"""Assistant node""" |
|
return {"messages": [llm_with_tools.invoke(state["messages"])]} |
|
|
|
builder = StateGraph(MessagesState) |
|
builder.add_node("assistant", assistant) |
|
builder.add_node("tools", ToolNode(tools)) |
|
builder.add_edge(START, "assistant") |
|
builder.add_conditional_edges( |
|
"assistant", |
|
tools_condition, |
|
) |
|
builder.add_edge("tools", "assistant") |
|
|
|
|
|
return builder.compile() |
|
|
|
|
|
|
|
|
|
print("--- Test Case 1: Basic Search ---") |
|
query1 = "Principle of double effect" |
|
result1 = search_wiki.invoke(query1) |
|
print(f"Query: '{query1}'") |
|
print(f"Result Type: {type(result1)}") |
|
print(f"Result (first 500 chars): {result1[:500]}...") |
|
print("\n") |
|
|
|
|
|
print("--- Test Case 1: Basic Search ---") |
|
query1 = "Principle of double effect" |
|
result1 = search_web.invoke(query1) |
|
print(f"Query: '{query1}'") |
|
print(f"Result Type: {type(result1)}") |
|
print(f"Result (first 500 chars): {result1[:500]}...") |
|
print("\n") |
|
|
|
|
|
if __name__ == "__main__": |
|
question = "When was St. Thomas Aquinas born?" |
|
|
|
graph = build_graph() |
|
|
|
messages = [ |
|
SystemMessage( |
|
content=system_prompt |
|
), |
|
HumanMessage( |
|
content=question |
|
)] |
|
messages = graph.invoke({"messages": messages}) |
|
for m in messages["messages"]: |
|
m.pretty_print() |