Spaces:
Runtime error
Runtime error
Update agent.py
Browse files
agent.py
CHANGED
@@ -3,78 +3,66 @@ from dotenv import load_dotenv
|
|
3 |
from langgraph.graph import START, StateGraph, MessagesState
|
4 |
from langgraph.prebuilt import tools_condition
|
5 |
from langgraph.prebuilt import ToolNode
|
6 |
-
from
|
7 |
from langchain_community.document_loaders import WikipediaLoader
|
8 |
from langchain_community.document_loaders import ArxivLoader
|
9 |
from langchain_core.messages import SystemMessage, HumanMessage
|
10 |
from langchain_core.tools import tool
|
11 |
from langchain_google_genai import ChatGoogleGenerativeAI
|
12 |
|
13 |
-
# .env laden (falls lokal)
|
14 |
load_dotenv()
|
15 |
|
16 |
-
# Google API Key aus Environment
|
17 |
GOOGLE_API_KEY = os.getenv("GOOGLE_API_KEY")
|
18 |
|
19 |
-
# --- Tools
|
20 |
@tool
|
21 |
def multiply(a: int, b: int) -> int:
|
22 |
-
"""Multiplies two numbers."""
|
23 |
return a * b
|
24 |
|
25 |
@tool
|
26 |
def add(a: int, b: int) -> int:
|
27 |
-
"""Adds two numbers."""
|
28 |
return a + b
|
29 |
|
30 |
@tool
|
31 |
def subtract(a: int, b: int) -> int:
|
32 |
-
"""Subtracts two numbers."""
|
33 |
return a - b
|
34 |
|
35 |
@tool
|
36 |
def divide(a: int, b: int) -> float:
|
37 |
-
"""Divides two numbers."""
|
38 |
if b == 0:
|
39 |
raise ValueError("Cannot divide by zero.")
|
40 |
return a / b
|
41 |
|
42 |
@tool
|
43 |
def modulo(a: int, b: int) -> int:
|
44 |
-
"""Returns the remainder after division."""
|
45 |
return a % b
|
46 |
|
47 |
@tool
|
48 |
def wiki_search(query: str) -> str:
|
49 |
-
"""Search Wikipedia for a query and return up to 2 results."""
|
50 |
search_docs = WikipediaLoader(query=query, load_max_docs=2).load()
|
51 |
-
|
52 |
-
[
|
53 |
-
f'<Document source="{doc.metadata["source"]}" page="{doc.metadata.get("page", "")}">\n{doc.page_content}\n</Document>'
|
54 |
-
for doc in search_docs
|
55 |
-
]
|
56 |
)
|
57 |
-
return {"wiki_results":
|
58 |
|
59 |
@tool
|
60 |
def arxiv_search(query: str) -> str:
|
61 |
-
"""Search Arxiv for a query and return up to 3 results."""
|
62 |
search_docs = ArxivLoader(query=query, load_max_docs=3).load()
|
63 |
-
|
64 |
-
[
|
65 |
-
f'<Document source="{doc.metadata["source"]}" page="{doc.metadata.get("page", "")}">\n{doc.page_content[:1000]}\n</Document>'
|
66 |
-
for doc in search_docs
|
67 |
-
]
|
68 |
)
|
69 |
-
return {"arxiv_results":
|
70 |
|
71 |
@tool
|
72 |
def web_search(query: str) -> str:
|
73 |
-
"""
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
|
|
|
|
|
|
78 |
tools = [
|
79 |
multiply,
|
80 |
add,
|
@@ -86,7 +74,6 @@ tools = [
|
|
86 |
web_search,
|
87 |
]
|
88 |
|
89 |
-
# System Prompt
|
90 |
system_prompt = (
|
91 |
"You are a highly accurate AI assistant. "
|
92 |
"Use tools when needed. Be very concise and precise. "
|
@@ -94,7 +81,6 @@ system_prompt = (
|
|
94 |
)
|
95 |
sys_msg = SystemMessage(content=system_prompt)
|
96 |
|
97 |
-
# --- Build Graph ---
|
98 |
def build_graph():
|
99 |
llm = ChatGoogleGenerativeAI(
|
100 |
model="gemini-2.0-flash",
|
@@ -106,7 +92,6 @@ def build_graph():
|
|
106 |
llm_with_tools = llm.bind_tools(tools)
|
107 |
|
108 |
def assistant(state: MessagesState):
|
109 |
-
"""Assistant Node"""
|
110 |
return {"messages": [llm_with_tools.invoke(state["messages"])]}
|
111 |
|
112 |
builder = StateGraph(MessagesState)
|
@@ -118,7 +103,7 @@ def build_graph():
|
|
118 |
|
119 |
return builder.compile()
|
120 |
|
121 |
-
#
|
122 |
def agent_executor(question: str) -> str:
|
123 |
graph = build_graph()
|
124 |
messages = [HumanMessage(content=question)]
|
|
|
3 |
from langgraph.graph import START, StateGraph, MessagesState
|
4 |
from langgraph.prebuilt import tools_condition
|
5 |
from langgraph.prebuilt import ToolNode
|
6 |
+
from duckduckgo_search import DDGS
|
7 |
from langchain_community.document_loaders import WikipediaLoader
|
8 |
from langchain_community.document_loaders import ArxivLoader
|
9 |
from langchain_core.messages import SystemMessage, HumanMessage
|
10 |
from langchain_core.tools import tool
|
11 |
from langchain_google_genai import ChatGoogleGenerativeAI
|
12 |
|
|
|
13 |
load_dotenv()
|
14 |
|
|
|
15 |
GOOGLE_API_KEY = os.getenv("GOOGLE_API_KEY")
|
16 |
|
17 |
+
# --- Tools ---
|
18 |
@tool
|
19 |
def multiply(a: int, b: int) -> int:
|
|
|
20 |
return a * b
|
21 |
|
22 |
@tool
|
23 |
def add(a: int, b: int) -> int:
|
|
|
24 |
return a + b
|
25 |
|
26 |
@tool
|
27 |
def subtract(a: int, b: int) -> int:
|
|
|
28 |
return a - b
|
29 |
|
30 |
@tool
|
31 |
def divide(a: int, b: int) -> float:
|
|
|
32 |
if b == 0:
|
33 |
raise ValueError("Cannot divide by zero.")
|
34 |
return a / b
|
35 |
|
36 |
@tool
|
37 |
def modulo(a: int, b: int) -> int:
|
|
|
38 |
return a % b
|
39 |
|
40 |
@tool
|
41 |
def wiki_search(query: str) -> str:
|
|
|
42 |
search_docs = WikipediaLoader(query=query, load_max_docs=2).load()
|
43 |
+
formatted = "\n\n---\n\n".join(
|
44 |
+
[f'<Document source="{doc.metadata["source"]}" page="{doc.metadata.get("page", "")}">\n{doc.page_content}\n</Document>' for doc in search_docs]
|
|
|
|
|
|
|
45 |
)
|
46 |
+
return {"wiki_results": formatted}
|
47 |
|
48 |
@tool
|
49 |
def arxiv_search(query: str) -> str:
|
|
|
50 |
search_docs = ArxivLoader(query=query, load_max_docs=3).load()
|
51 |
+
formatted = "\n\n---\n\n".join(
|
52 |
+
[f'<Document source="{doc.metadata["source"]}" page="{doc.metadata.get("page", "")}">\n{doc.page_content[:1000]}\n</Document>' for doc in search_docs]
|
|
|
|
|
|
|
53 |
)
|
54 |
+
return {"arxiv_results": formatted}
|
55 |
|
56 |
@tool
|
57 |
def web_search(query: str) -> str:
|
58 |
+
"""Searches DuckDuckGo for a query."""
|
59 |
+
with DDGS() as ddgs:
|
60 |
+
results = ddgs.text(query, max_results=5)
|
61 |
+
if not results:
|
62 |
+
return "No results found."
|
63 |
+
return "\n\n".join(f"{r['title']}: {r['href']}" for r in results)
|
64 |
+
|
65 |
+
# --- Setup LLM und Tools ---
|
66 |
tools = [
|
67 |
multiply,
|
68 |
add,
|
|
|
74 |
web_search,
|
75 |
]
|
76 |
|
|
|
77 |
system_prompt = (
|
78 |
"You are a highly accurate AI assistant. "
|
79 |
"Use tools when needed. Be very concise and precise. "
|
|
|
81 |
)
|
82 |
sys_msg = SystemMessage(content=system_prompt)
|
83 |
|
|
|
84 |
def build_graph():
|
85 |
llm = ChatGoogleGenerativeAI(
|
86 |
model="gemini-2.0-flash",
|
|
|
92 |
llm_with_tools = llm.bind_tools(tools)
|
93 |
|
94 |
def assistant(state: MessagesState):
|
|
|
95 |
return {"messages": [llm_with_tools.invoke(state["messages"])]}
|
96 |
|
97 |
builder = StateGraph(MessagesState)
|
|
|
103 |
|
104 |
return builder.compile()
|
105 |
|
106 |
+
# Agent Executor für app.py
|
107 |
def agent_executor(question: str) -> str:
|
108 |
graph = build_graph()
|
109 |
messages = [HumanMessage(content=question)]
|